repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.image_list
|
python
|
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
|
Return a mapping of all image data for available providers
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L836-L872
|
[
"def lookup_providers(self, lookup):\n '''\n Get a dict describing the configured providers\n '''\n if lookup is None:\n lookup = 'all'\n if lookup == 'all':\n providers = set()\n for alias, drivers in six.iteritems(self.opts['providers']):\n for driver in drivers:\n providers.add((alias, driver))\n\n if not providers:\n raise SaltCloudSystemExit(\n 'There are no cloud providers configured.'\n )\n\n return providers\n\n if ':' in lookup:\n alias, driver = lookup.split(':')\n if alias not in self.opts['providers'] or \\\n driver not in self.opts['providers'][alias]:\n raise SaltCloudSystemExit(\n 'No cloud providers matched \\'{0}\\'. Available: {1}'.format(\n lookup, ', '.join(self.get_configured_providers())\n )\n )\n\n providers = set()\n for alias, drivers in six.iteritems(self.opts['providers']):\n for driver in drivers:\n if lookup in (alias, driver):\n providers.add((alias, driver))\n\n if not providers:\n raise SaltCloudSystemExit(\n 'No cloud providers matched \\'{0}\\'. '\n 'Available selections: {1}'.format(\n lookup, ', '.join(self.get_configured_providers())\n )\n )\n return providers\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.provider_list
|
python
|
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
|
Return a mapping of all image data for available providers
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L912-L926
|
[
"def lookup_providers(self, lookup):\n '''\n Get a dict describing the configured providers\n '''\n if lookup is None:\n lookup = 'all'\n if lookup == 'all':\n providers = set()\n for alias, drivers in six.iteritems(self.opts['providers']):\n for driver in drivers:\n providers.add((alias, driver))\n\n if not providers:\n raise SaltCloudSystemExit(\n 'There are no cloud providers configured.'\n )\n\n return providers\n\n if ':' in lookup:\n alias, driver = lookup.split(':')\n if alias not in self.opts['providers'] or \\\n driver not in self.opts['providers'][alias]:\n raise SaltCloudSystemExit(\n 'No cloud providers matched \\'{0}\\'. Available: {1}'.format(\n lookup, ', '.join(self.get_configured_providers())\n )\n )\n\n providers = set()\n for alias, drivers in six.iteritems(self.opts['providers']):\n for driver in drivers:\n if lookup in (alias, driver):\n providers.add((alias, driver))\n\n if not providers:\n raise SaltCloudSystemExit(\n 'No cloud providers matched \\'{0}\\'. '\n 'Available selections: {1}'.format(\n lookup, ', '.join(self.get_configured_providers())\n )\n )\n return providers\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.profile_list
|
python
|
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
|
Return a mapping of all configured profiles
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L928-L943
|
[
"def lookup_profiles(self, provider, lookup):\n '''\n Return a dictionary describing the configured profiles\n '''\n if provider is None:\n provider = 'all'\n if lookup is None:\n lookup = 'all'\n\n if lookup == 'all':\n profiles = set()\n provider_profiles = set()\n for alias, info in six.iteritems(self.opts['profiles']):\n providers = info.get('provider')\n\n if providers:\n given_prov_name = providers.split(':')[0]\n salt_prov_name = providers.split(':')[1]\n if given_prov_name == provider:\n provider_profiles.add((alias, given_prov_name))\n elif salt_prov_name == provider:\n provider_profiles.add((alias, salt_prov_name))\n profiles.add((alias, given_prov_name))\n\n if not profiles:\n raise SaltCloudSystemExit(\n 'There are no cloud profiles configured.'\n )\n\n if provider != 'all':\n return provider_profiles\n\n return profiles\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.create_all
|
python
|
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
|
Create/Verify the VMs in the VM data
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L945-L956
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def create(self, vm_, local_master=True):\n '''\n Create a single VM\n '''\n output = {}\n\n minion_dict = salt.config.get_cloud_config_value(\n 'minion', vm_, self.opts, default={}\n )\n\n alias, driver = vm_['provider'].split(':')\n fun = '{0}.create'.format(driver)\n if fun not in self.clouds:\n log.error(\n 'Creating \\'%s\\' using \\'%s\\' as the provider '\n 'cannot complete since \\'%s\\' is not available',\n vm_['name'], vm_['provider'], driver\n )\n return\n\n deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)\n make_master = salt.config.get_cloud_config_value(\n 'make_master',\n vm_,\n self.opts\n )\n\n if deploy:\n if not make_master and 'master' not in minion_dict:\n log.warning(\n 'There\\'s no master defined on the \\'%s\\' VM settings.',\n vm_['name']\n )\n\n if 'pub_key' not in vm_ and 'priv_key' not in vm_:\n log.debug('Generating minion keys for \\'%s\\'', vm_['name'])\n priv, pub = salt.utils.cloud.gen_keys(\n salt.config.get_cloud_config_value(\n 'keysize',\n vm_,\n self.opts\n )\n )\n vm_['pub_key'] = pub\n vm_['priv_key'] = priv\n else:\n # Note(pabelanger): We still reference pub_key and priv_key when\n # deploy is disabled.\n vm_['pub_key'] = None\n vm_['priv_key'] = None\n\n key_id = minion_dict.get('id', vm_['name'])\n\n domain = vm_.get('domain')\n if vm_.get('use_fqdn') and domain:\n minion_dict['append_domain'] = domain\n\n if 'append_domain' in minion_dict:\n key_id = '.'.join([key_id, minion_dict['append_domain']])\n\n if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:\n log.debug('Generating the master keys for \\'%s\\'', vm_['name'])\n master_priv, master_pub = salt.utils.cloud.gen_keys(\n salt.config.get_cloud_config_value(\n 'keysize',\n vm_,\n self.opts\n )\n )\n vm_['master_pub'] = master_pub\n vm_['master_pem'] = master_priv\n\n if local_master is True and deploy is True:\n # Accept the key on the local master\n salt.utils.cloud.accept_key(\n self.opts['pki_dir'], vm_['pub_key'], key_id\n )\n\n vm_['os'] = salt.config.get_cloud_config_value(\n 'script',\n vm_,\n self.opts\n )\n\n try:\n vm_['inline_script'] = salt.config.get_cloud_config_value(\n 'inline_script',\n vm_,\n self.opts\n )\n except KeyError:\n pass\n\n try:\n alias, driver = vm_['provider'].split(':')\n func = '{0}.create'.format(driver)\n with salt.utils.context.func_globals_inject(\n self.clouds[fun],\n __active_provider_name__=':'.join([alias, driver])\n ):\n output = self.clouds[func](vm_)\n if output is not False and 'sync_after_install' in self.opts:\n if self.opts['sync_after_install'] not in (\n 'all', 'modules', 'states', 'grains'):\n log.error('Bad option for sync_after_install')\n return output\n\n # A small pause helps the sync work more reliably\n time.sleep(3)\n\n start = int(time.time())\n while int(time.time()) < start + 60:\n # We'll try every <timeout> seconds, up to a minute\n mopts_ = salt.config.DEFAULT_MASTER_OPTS\n conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])\n mopts_.update(\n salt.config.master_config(\n os.path.join(conf_path,\n 'master')\n )\n )\n\n client = salt.client.get_local_client(mopts=mopts_)\n\n ret = client.cmd(\n vm_['name'],\n 'saltutil.sync_{0}'.format(self.opts['sync_after_install']),\n timeout=self.opts['timeout']\n )\n if ret:\n log.info(\n six.u('Synchronized the following dynamic modules: '\n ' {0}').format(ret)\n )\n break\n except KeyError as exc:\n log.exception(\n 'Failed to create VM %s. Configuration value %s needs '\n 'to be set', vm_['name'], exc\n )\n # If it's a map then we need to respect the 'requires'\n # so we do it later\n try:\n opt_map = self.opts['map']\n except KeyError:\n opt_map = False\n if self.opts['parallel'] and self.opts['start_action'] and not opt_map:\n log.info('Running %s on %s', self.opts['start_action'], vm_['name'])\n client = salt.client.get_local_client(mopts=self.opts)\n action_out = client.cmd(\n vm_['name'],\n self.opts['start_action'],\n timeout=self.opts['timeout'] * 60\n )\n output['ret'] = action_out\n return output\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.destroy
|
python
|
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
|
Destroy the named VMs
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L958-L1138
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def enter_mainloop(target,\n mapped_args=None,\n args=None,\n kwargs=None,\n pool=None,\n pool_size=None,\n callback=None,\n queue=None):\n '''\n Manage a multiprocessing pool\n\n - If the queue does not output anything, the pool runs indefinitely\n\n - If the queue returns KEYBOARDINT or ERROR, this will kill the pool\n totally calling terminate & join and ands with a SaltCloudSystemExit\n exception notifying callers from the abnormal termination\n\n - If the queue returns END or callback is defined and returns True,\n it just join the process and return the data.\n\n target\n the function you want to execute in multiproccessing\n pool\n pool object can be None if you want a default pool, but you ll\n have then to define pool_size instead\n pool_size\n pool size if you did not provide yourself a pool\n callback\n a boolean taking a string in argument which returns True to\n signal that 'target' is finished and we need to join\n the pool\n queue\n A custom multiproccessing queue in case you want to do\n extra stuff and need it later in your program\n args\n positional arguments to call the function with\n if you don't want to use pool.map\n\n mapped_args\n a list of one or more arguments combinations to call the function with\n e.g. (foo, [[1], [2]]) will call::\n\n foo([1])\n foo([2])\n\n kwargs\n kwargs to give to the function in case of process\n\n Attention, the function must have the following signature:\n\n target(queue, *args, **kw)\n\n You may use the 'communicator' decorator to generate such a function\n (see end of this file)\n '''\n if not kwargs:\n kwargs = {}\n if not pool_size:\n pool_size = 1\n if not pool:\n pool = multiprocessing.Pool(pool_size)\n if not queue:\n manager = multiprocessing.Manager()\n queue = manager.Queue()\n\n if mapped_args is not None and not mapped_args:\n msg = (\n 'We are called to asynchronously execute {0}'\n ' but we do no have anything to execute, weird,'\n ' we bail out'.format(target))\n log.error(msg)\n raise SaltCloudSystemExit('Exception caught\\n{0}'.format(msg))\n elif mapped_args is not None:\n iterable = [[queue, [arg], kwargs] for arg in mapped_args]\n ret = pool.map(func=target, iterable=iterable)\n else:\n ret = pool.apply(target, [queue, args, kwargs])\n while True:\n test = queue.get()\n if test in ['ERROR', 'KEYBOARDINT']:\n type_ = queue.get()\n trace = queue.get()\n msg = 'Caught {0}, terminating workers\\n'.format(type_)\n msg += 'TRACE: {0}\\n'.format(trace)\n log.error(msg)\n pool.terminate()\n pool.join()\n raise SaltCloudSystemExit('Exception caught\\n{0}'.format(msg))\n elif test in ['END'] or (callback and callback(test)):\n pool.close()\n pool.join()\n break\n else:\n time.sleep(0.125)\n return ret\n",
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def remove_key(pki_dir, id_):\n '''\n This method removes a specified key from the accepted keys dir\n '''\n key = os.path.join(pki_dir, 'minions', id_)\n if os.path.isfile(key):\n os.remove(key)\n log.debug('Deleted \\'%s\\'', key)\n",
"def get_running_by_names(self, names, query='list_nodes', cached=False,\n profile=None):\n if isinstance(names, six.string_types):\n names = [names]\n\n matches = {}\n handled_drivers = {}\n mapped_providers = self.map_providers_parallel(query, cached=cached)\n for alias, drivers in six.iteritems(mapped_providers):\n for driver, vms in six.iteritems(drivers):\n if driver not in handled_drivers:\n handled_drivers[driver] = alias\n # When a profile is specified, only return an instance\n # that matches the provider specified in the profile.\n # This solves the issues when many providers return the\n # same instance. For example there may be one provider for\n # each availability zone in amazon in the same region, but\n # the search returns the same instance for each provider\n # because amazon returns all instances in a region, not\n # availability zone.\n if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:\n continue\n\n for vm_name, details in six.iteritems(vms):\n # XXX: The logic below can be removed once the aws driver\n # is removed\n if vm_name not in names:\n continue\n\n elif driver == 'ec2' and 'aws' in handled_drivers and \\\n 'aws' in matches[handled_drivers['aws']] and \\\n vm_name in matches[handled_drivers['aws']]['aws']:\n continue\n elif driver == 'aws' and 'ec2' in handled_drivers and \\\n 'ec2' in matches[handled_drivers['ec2']] and \\\n vm_name in matches[handled_drivers['ec2']]['ec2']:\n continue\n\n if alias not in matches:\n matches[alias] = {}\n if driver not in matches[alias]:\n matches[alias][driver] = {}\n matches[alias][driver][vm_name] = details\n\n return matches\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.reboot
|
python
|
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
|
Reboot the named VMs
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1140-L1159
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def map_providers_parallel(self, query='list_nodes', cached=False):\n '''\n Return a mapping of what named VMs are running on what VM providers\n based on what providers are defined in the configuration and VMs\n\n Same as map_providers but query in parallel.\n '''\n if cached is True and query in self.__cached_provider_queries:\n return self.__cached_provider_queries[query]\n\n opts = self.opts.copy()\n multiprocessing_data = []\n\n # Optimize Providers\n opts['providers'] = self._optimize_providers(opts['providers'])\n for alias, drivers in six.iteritems(opts['providers']):\n # Make temp query for this driver to avoid overwrite next\n this_query = query\n for driver, details in six.iteritems(drivers):\n # If driver has function list_nodes_min, just replace it\n # with query param to check existing vms on this driver\n # for minimum information, Otherwise still use query param.\n if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:\n this_query = 'list_nodes_min'\n\n fun = '{0}.{1}'.format(driver, this_query)\n if fun not in self.clouds:\n log.error('Public cloud provider %s is not available', driver)\n continue\n\n multiprocessing_data.append({\n 'fun': fun,\n 'opts': opts,\n 'query': this_query,\n 'alias': alias,\n 'driver': driver\n })\n output = {}\n if not multiprocessing_data:\n return output\n\n data_count = len(multiprocessing_data)\n pool = multiprocessing.Pool(data_count < 10 and data_count or 10,\n init_pool_worker)\n parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,\n multiprocessing_data,\n pool=pool)\n for alias, driver, details in parallel_pmap:\n if not details:\n # There's no providers details?! Skip it!\n continue\n if alias not in output:\n output[alias] = {}\n output[alias][driver] = details\n\n self.__cached_provider_queries[query] = output\n return output\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.create
|
python
|
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
|
Create a single VM
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1161-L1316
|
[
"def u(s):\n return unicode(s.replace(r'\\\\', r'\\\\\\\\'), \"unicode_escape\")\n",
"def get_local_client(\n c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),\n mopts=None,\n skip_perm_errors=False,\n io_loop=None,\n auto_reconnect=False):\n '''\n .. versionadded:: 2014.7.0\n\n Read in the config and return the correct LocalClient object based on\n the configured transport\n\n :param IOLoop io_loop: io_loop used for events.\n Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n if mopts:\n opts = mopts\n else:\n # Late import to prevent circular import\n import salt.config\n opts = salt.config.client_config(c_path)\n\n # TODO: AIO core is separate from transport\n return LocalClient(\n mopts=opts,\n skip_perm_errors=skip_perm_errors,\n io_loop=io_loop,\n auto_reconnect=auto_reconnect)\n",
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def gen_keys(keysize=2048):\n '''\n Generate Salt minion keys and return them as PEM file strings\n '''\n # Mandate that keys are at least 2048 in size\n if keysize < 2048:\n keysize = 2048\n tdir = tempfile.mkdtemp()\n\n salt.crypt.gen_keys(tdir, 'minion', keysize)\n priv_path = os.path.join(tdir, 'minion.pem')\n pub_path = os.path.join(tdir, 'minion.pub')\n with salt.utils.files.fopen(priv_path) as fp_:\n priv = salt.utils.stringutils.to_unicode(fp_.read())\n with salt.utils.files.fopen(pub_path) as fp_:\n pub = salt.utils.stringutils.to_unicode(fp_.read())\n shutil.rmtree(tdir)\n return priv, pub\n",
"def accept_key(pki_dir, pub, id_):\n '''\n If the master config was available then we will have a pki_dir key in\n the opts directory, this method places the pub key in the accepted\n keys dir and removes it from the unaccepted keys dir if that is the case.\n '''\n for key_dir in 'minions', 'minions_pre', 'minions_rejected':\n key_path = os.path.join(pki_dir, key_dir)\n if not os.path.exists(key_path):\n os.makedirs(key_path)\n\n key = os.path.join(pki_dir, 'minions', id_)\n with salt.utils.files.fopen(key, 'w+') as fp_:\n fp_.write(salt.utils.stringutils.to_str(pub))\n\n oldkey = os.path.join(pki_dir, 'minions_pre', id_)\n if os.path.isfile(oldkey):\n with salt.utils.files.fopen(oldkey) as fp_:\n if fp_.read() == pub:\n os.remove(oldkey)\n",
"def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None, exit_on_config_errors=False):\n '''\n Reads in the master configuration file and sets up default options\n\n This is useful for running the actual master daemon. For running\n Master-side client interfaces that need the master opts see\n :py:func:`salt.client.client_config`.\n '''\n if defaults is None:\n defaults = DEFAULT_MASTER_OPTS.copy()\n\n if not os.environ.get(env_var, None):\n # No valid setting was given using the configuration variable.\n # Lets see is SALT_CONFIG_DIR is of any use\n salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)\n if salt_config_dir:\n env_config_file_path = os.path.join(salt_config_dir, 'master')\n if salt_config_dir and os.path.isfile(env_config_file_path):\n # We can get a configuration file using SALT_CONFIG_DIR, let's\n # update the environment with this information\n os.environ[env_var] = env_config_file_path\n\n overrides = load_config(path, env_var, DEFAULT_MASTER_OPTS['conf_file'])\n default_include = overrides.get('default_include',\n defaults['default_include'])\n include = overrides.get('include', [])\n\n overrides.update(include_config(default_include, path, verbose=False,\n exit_on_config_errors=exit_on_config_errors))\n overrides.update(include_config(include, path, verbose=True,\n exit_on_config_errors=exit_on_config_errors))\n opts = apply_master_config(overrides, defaults)\n _validate_ssh_minion_opts(opts)\n _validate_opts(opts)\n # If 'nodegroups:' is uncommented in the master config file, and there are\n # no nodegroups defined, opts['nodegroups'] will be None. Fix this by\n # reverting this value to the default, as if 'nodegroups:' was commented\n # out or not present.\n if opts.get('nodegroups') is None:\n opts['nodegroups'] = DEFAULT_MASTER_OPTS.get('nodegroups', {})\n if salt.utils.data.is_dictlist(opts['nodegroups']):\n opts['nodegroups'] = salt.utils.data.repack_dictlist(opts['nodegroups'])\n apply_sdb(opts)\n return opts\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.vm_config
|
python
|
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
|
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1319-L1334
|
[
"def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.extras
|
python
|
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
|
Extra actions
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1336-L1364
| null |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.run_profile
|
python
|
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
|
Parse over the options passed on the command line and determine how to
handle them
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1366-L1453
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def safe_load(stream, Loader=SaltYamlSafeLoader):\n '''\n .. versionadded:: 2018.3.0\n\n Helper function which automagically uses our custom loader.\n '''\n return yaml.load(stream, Loader=Loader)\n",
"def map_providers_parallel(self, query='list_nodes', cached=False):\n '''\n Return a mapping of what named VMs are running on what VM providers\n based on what providers are defined in the configuration and VMs\n\n Same as map_providers but query in parallel.\n '''\n if cached is True and query in self.__cached_provider_queries:\n return self.__cached_provider_queries[query]\n\n opts = self.opts.copy()\n multiprocessing_data = []\n\n # Optimize Providers\n opts['providers'] = self._optimize_providers(opts['providers'])\n for alias, drivers in six.iteritems(opts['providers']):\n # Make temp query for this driver to avoid overwrite next\n this_query = query\n for driver, details in six.iteritems(drivers):\n # If driver has function list_nodes_min, just replace it\n # with query param to check existing vms on this driver\n # for minimum information, Otherwise still use query param.\n if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:\n this_query = 'list_nodes_min'\n\n fun = '{0}.{1}'.format(driver, this_query)\n if fun not in self.clouds:\n log.error('Public cloud provider %s is not available', driver)\n continue\n\n multiprocessing_data.append({\n 'fun': fun,\n 'opts': opts,\n 'query': this_query,\n 'alias': alias,\n 'driver': driver\n })\n output = {}\n if not multiprocessing_data:\n return output\n\n data_count = len(multiprocessing_data)\n pool = multiprocessing.Pool(data_count < 10 and data_count or 10,\n init_pool_worker)\n parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,\n multiprocessing_data,\n pool=pool)\n for alias, driver, details in parallel_pmap:\n if not details:\n # There's no providers details?! Skip it!\n continue\n if alias not in output:\n output[alias] = {}\n output[alias][driver] = details\n\n self.__cached_provider_queries[query] = output\n return output\n",
"def create(self, vm_, local_master=True):\n '''\n Create a single VM\n '''\n output = {}\n\n minion_dict = salt.config.get_cloud_config_value(\n 'minion', vm_, self.opts, default={}\n )\n\n alias, driver = vm_['provider'].split(':')\n fun = '{0}.create'.format(driver)\n if fun not in self.clouds:\n log.error(\n 'Creating \\'%s\\' using \\'%s\\' as the provider '\n 'cannot complete since \\'%s\\' is not available',\n vm_['name'], vm_['provider'], driver\n )\n return\n\n deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)\n make_master = salt.config.get_cloud_config_value(\n 'make_master',\n vm_,\n self.opts\n )\n\n if deploy:\n if not make_master and 'master' not in minion_dict:\n log.warning(\n 'There\\'s no master defined on the \\'%s\\' VM settings.',\n vm_['name']\n )\n\n if 'pub_key' not in vm_ and 'priv_key' not in vm_:\n log.debug('Generating minion keys for \\'%s\\'', vm_['name'])\n priv, pub = salt.utils.cloud.gen_keys(\n salt.config.get_cloud_config_value(\n 'keysize',\n vm_,\n self.opts\n )\n )\n vm_['pub_key'] = pub\n vm_['priv_key'] = priv\n else:\n # Note(pabelanger): We still reference pub_key and priv_key when\n # deploy is disabled.\n vm_['pub_key'] = None\n vm_['priv_key'] = None\n\n key_id = minion_dict.get('id', vm_['name'])\n\n domain = vm_.get('domain')\n if vm_.get('use_fqdn') and domain:\n minion_dict['append_domain'] = domain\n\n if 'append_domain' in minion_dict:\n key_id = '.'.join([key_id, minion_dict['append_domain']])\n\n if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:\n log.debug('Generating the master keys for \\'%s\\'', vm_['name'])\n master_priv, master_pub = salt.utils.cloud.gen_keys(\n salt.config.get_cloud_config_value(\n 'keysize',\n vm_,\n self.opts\n )\n )\n vm_['master_pub'] = master_pub\n vm_['master_pem'] = master_priv\n\n if local_master is True and deploy is True:\n # Accept the key on the local master\n salt.utils.cloud.accept_key(\n self.opts['pki_dir'], vm_['pub_key'], key_id\n )\n\n vm_['os'] = salt.config.get_cloud_config_value(\n 'script',\n vm_,\n self.opts\n )\n\n try:\n vm_['inline_script'] = salt.config.get_cloud_config_value(\n 'inline_script',\n vm_,\n self.opts\n )\n except KeyError:\n pass\n\n try:\n alias, driver = vm_['provider'].split(':')\n func = '{0}.create'.format(driver)\n with salt.utils.context.func_globals_inject(\n self.clouds[fun],\n __active_provider_name__=':'.join([alias, driver])\n ):\n output = self.clouds[func](vm_)\n if output is not False and 'sync_after_install' in self.opts:\n if self.opts['sync_after_install'] not in (\n 'all', 'modules', 'states', 'grains'):\n log.error('Bad option for sync_after_install')\n return output\n\n # A small pause helps the sync work more reliably\n time.sleep(3)\n\n start = int(time.time())\n while int(time.time()) < start + 60:\n # We'll try every <timeout> seconds, up to a minute\n mopts_ = salt.config.DEFAULT_MASTER_OPTS\n conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])\n mopts_.update(\n salt.config.master_config(\n os.path.join(conf_path,\n 'master')\n )\n )\n\n client = salt.client.get_local_client(mopts=mopts_)\n\n ret = client.cmd(\n vm_['name'],\n 'saltutil.sync_{0}'.format(self.opts['sync_after_install']),\n timeout=self.opts['timeout']\n )\n if ret:\n log.info(\n six.u('Synchronized the following dynamic modules: '\n ' {0}').format(ret)\n )\n break\n except KeyError as exc:\n log.exception(\n 'Failed to create VM %s. Configuration value %s needs '\n 'to be set', vm_['name'], exc\n )\n # If it's a map then we need to respect the 'requires'\n # so we do it later\n try:\n opt_map = self.opts['map']\n except KeyError:\n opt_map = False\n if self.opts['parallel'] and self.opts['start_action'] and not opt_map:\n log.info('Running %s on %s', self.opts['start_action'], vm_['name'])\n client = salt.client.get_local_client(mopts=self.opts)\n action_out = client.cmd(\n vm_['name'],\n self.opts['start_action'],\n timeout=self.opts['timeout'] * 60\n )\n output['ret'] = action_out\n return output\n",
"def vm_config(name, main, provider, profile, overrides):\n '''\n Create vm config.\n\n :param str name: The name of the vm\n :param dict main: The main cloud config\n :param dict provider: The provider config\n :param dict profile: The profile config\n :param dict overrides: The vm's config overrides\n '''\n vm = main.copy()\n vm = salt.utils.dictupdate.update(vm, provider)\n vm = salt.utils.dictupdate.update(vm, profile)\n vm.update(overrides)\n vm['name'] = name\n return vm\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.do_action
|
python
|
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
|
Perform an action on a VM which may be specific to this cloud provider
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1455-L1547
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def clean_kwargs(**kwargs):\n '''\n Return a dict without any of the __pub* keys (or any other keys starting\n with a dunder) from the kwargs dict passed into the execution module\n functions. These keys are useful for tracking what was used to invoke\n the function call, but they may not be desirable to have if passing the\n kwargs forward wholesale.\n\n Usage example:\n\n .. code-block:: python\n\n kwargs = __utils__['args.clean_kwargs'](**kwargs)\n '''\n ret = {}\n for key, val in six.iteritems(kwargs):\n if not key.startswith('__'):\n ret[key] = val\n return ret\n",
"def map_providers_parallel(self, query='list_nodes', cached=False):\n '''\n Return a mapping of what named VMs are running on what VM providers\n based on what providers are defined in the configuration and VMs\n\n Same as map_providers but query in parallel.\n '''\n if cached is True and query in self.__cached_provider_queries:\n return self.__cached_provider_queries[query]\n\n opts = self.opts.copy()\n multiprocessing_data = []\n\n # Optimize Providers\n opts['providers'] = self._optimize_providers(opts['providers'])\n for alias, drivers in six.iteritems(opts['providers']):\n # Make temp query for this driver to avoid overwrite next\n this_query = query\n for driver, details in six.iteritems(drivers):\n # If driver has function list_nodes_min, just replace it\n # with query param to check existing vms on this driver\n # for minimum information, Otherwise still use query param.\n if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:\n this_query = 'list_nodes_min'\n\n fun = '{0}.{1}'.format(driver, this_query)\n if fun not in self.clouds:\n log.error('Public cloud provider %s is not available', driver)\n continue\n\n multiprocessing_data.append({\n 'fun': fun,\n 'opts': opts,\n 'query': this_query,\n 'alias': alias,\n 'driver': driver\n })\n output = {}\n if not multiprocessing_data:\n return output\n\n data_count = len(multiprocessing_data)\n pool = multiprocessing.Pool(data_count < 10 and data_count or 10,\n init_pool_worker)\n parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,\n multiprocessing_data,\n pool=pool)\n for alias, driver, details in parallel_pmap:\n if not details:\n # There's no providers details?! Skip it!\n continue\n if alias not in output:\n output[alias] = {}\n output[alias][driver] = details\n\n self.__cached_provider_queries[query] = output\n return output\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.do_function
|
python
|
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
|
Perform a function against a cloud provider
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1549-L1595
|
[
"def lookup_providers(self, lookup):\n '''\n Get a dict describing the configured providers\n '''\n if lookup is None:\n lookup = 'all'\n if lookup == 'all':\n providers = set()\n for alias, drivers in six.iteritems(self.opts['providers']):\n for driver in drivers:\n providers.add((alias, driver))\n\n if not providers:\n raise SaltCloudSystemExit(\n 'There are no cloud providers configured.'\n )\n\n return providers\n\n if ':' in lookup:\n alias, driver = lookup.split(':')\n if alias not in self.opts['providers'] or \\\n driver not in self.opts['providers'][alias]:\n raise SaltCloudSystemExit(\n 'No cloud providers matched \\'{0}\\'. Available: {1}'.format(\n lookup, ', '.join(self.get_configured_providers())\n )\n )\n\n providers = set()\n for alias, drivers in six.iteritems(self.opts['providers']):\n for driver in drivers:\n if lookup in (alias, driver):\n providers.add((alias, driver))\n\n if not providers:\n raise SaltCloudSystemExit(\n 'No cloud providers matched \\'{0}\\'. '\n 'Available selections: {1}'.format(\n lookup, ', '.join(self.get_configured_providers())\n )\n )\n return providers\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
saltstack/salt
|
salt/cloud/__init__.py
|
Cloud.__filter_non_working_providers
|
python
|
def __filter_non_working_providers(self):
'''
Remove any mis-configured cloud providers from the available listing
'''
for alias, drivers in six.iteritems(self.opts['providers'].copy()):
for driver in drivers.copy():
fun = '{0}.get_configured_provider'.format(driver)
if fun not in self.clouds:
# Mis-configured provider that got removed?
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias, could not be loaded. '
'Please check your provider configuration files and '
'ensure all required dependencies are installed '
'for the \'%s\' driver.\n'
'In rare cases, this could indicate the \'%s()\' '
'function could not be found.\nRemoving \'%s\' from '
'the available providers list',
driver, alias, driver, fun, driver
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if self.clouds[fun]() is False:
log.warning(
'The cloud driver, \'%s\', configured under the '
'\'%s\' cloud provider alias is not properly '
'configured. Removing it from the available '
'providers list.', driver, alias
)
self.opts['providers'][alias].pop(driver)
if alias not in self.opts['providers']:
continue
if not self.opts['providers'][alias]:
self.opts['providers'].pop(alias)
|
Remove any mis-configured cloud providers from the available listing
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1597-L1643
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n"
] |
class Cloud(object):
'''
An object for the creation of new VMs
'''
def __init__(self, opts):
self.opts = opts
self.client = CloudClient(opts=self.opts)
self.clouds = salt.loader.clouds(self.opts)
self.__filter_non_working_providers()
self.__cached_provider_queries = {}
def get_configured_providers(self):
'''
Return the configured providers
'''
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
if len(drivers) > 1:
for driver in drivers:
providers.add('{0}:{1}'.format(alias, driver))
continue
providers.add(alias)
return providers
def lookup_providers(self, lookup):
'''
Get a dict describing the configured providers
'''
if lookup is None:
lookup = 'all'
if lookup == 'all':
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'There are no cloud providers configured.'
)
return providers
if ':' in lookup:
alias, driver = lookup.split(':')
if alias not in self.opts['providers'] or \
driver not in self.opts['providers'][alias]:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. Available: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
providers = set()
for alias, drivers in six.iteritems(self.opts['providers']):
for driver in drivers:
if lookup in (alias, driver):
providers.add((alias, driver))
if not providers:
raise SaltCloudSystemExit(
'No cloud providers matched \'{0}\'. '
'Available selections: {1}'.format(
lookup, ', '.join(self.get_configured_providers())
)
)
return providers
def lookup_profiles(self, provider, lookup):
'''
Return a dictionary describing the configured profiles
'''
if provider is None:
provider = 'all'
if lookup is None:
lookup = 'all'
if lookup == 'all':
profiles = set()
provider_profiles = set()
for alias, info in six.iteritems(self.opts['profiles']):
providers = info.get('provider')
if providers:
given_prov_name = providers.split(':')[0]
salt_prov_name = providers.split(':')[1]
if given_prov_name == provider:
provider_profiles.add((alias, given_prov_name))
elif salt_prov_name == provider:
provider_profiles.add((alias, salt_prov_name))
profiles.add((alias, given_prov_name))
if not profiles:
raise SaltCloudSystemExit(
'There are no cloud profiles configured.'
)
if provider != 'all':
return provider_profiles
return profiles
def map_providers(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
pmap = {}
for alias, drivers in six.iteritems(self.opts['providers']):
for driver, details in six.iteritems(drivers):
fun = '{0}.{1}'.format(driver, query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
if alias not in pmap:
pmap[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
pmap[alias][driver] = self.clouds[fun]()
except Exception as err:
log.debug(
'Failed to execute \'%s()\' while querying for '
'running nodes: %s', fun, err,
exc_info_on_loglevel=logging.DEBUG
)
# Failed to communicate with the provider, don't list any
# nodes
pmap[alias][driver] = []
self.__cached_provider_queries[query] = pmap
return pmap
def map_providers_parallel(self, query='list_nodes', cached=False):
'''
Return a mapping of what named VMs are running on what VM providers
based on what providers are defined in the configuration and VMs
Same as map_providers but query in parallel.
'''
if cached is True and query in self.__cached_provider_queries:
return self.__cached_provider_queries[query]
opts = self.opts.copy()
multiprocessing_data = []
# Optimize Providers
opts['providers'] = self._optimize_providers(opts['providers'])
for alias, drivers in six.iteritems(opts['providers']):
# Make temp query for this driver to avoid overwrite next
this_query = query
for driver, details in six.iteritems(drivers):
# If driver has function list_nodes_min, just replace it
# with query param to check existing vms on this driver
# for minimum information, Otherwise still use query param.
if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:
this_query = 'list_nodes_min'
fun = '{0}.{1}'.format(driver, this_query)
if fun not in self.clouds:
log.error('Public cloud provider %s is not available', driver)
continue
multiprocessing_data.append({
'fun': fun,
'opts': opts,
'query': this_query,
'alias': alias,
'driver': driver
})
output = {}
if not multiprocessing_data:
return output
data_count = len(multiprocessing_data)
pool = multiprocessing.Pool(data_count < 10 and data_count or 10,
init_pool_worker)
parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,
multiprocessing_data,
pool=pool)
for alias, driver, details in parallel_pmap:
if not details:
# There's no providers details?! Skip it!
continue
if alias not in output:
output[alias] = {}
output[alias][driver] = details
self.__cached_provider_queries[query] = output
return output
def get_running_by_names(self, names, query='list_nodes', cached=False,
profile=None):
if isinstance(names, six.string_types):
names = [names]
matches = {}
handled_drivers = {}
mapped_providers = self.map_providers_parallel(query, cached=cached)
for alias, drivers in six.iteritems(mapped_providers):
for driver, vms in six.iteritems(drivers):
if driver not in handled_drivers:
handled_drivers[driver] = alias
# When a profile is specified, only return an instance
# that matches the provider specified in the profile.
# This solves the issues when many providers return the
# same instance. For example there may be one provider for
# each availability zone in amazon in the same region, but
# the search returns the same instance for each provider
# because amazon returns all instances in a region, not
# availability zone.
if profile and alias not in self.opts['profiles'][profile]['provider'].split(':')[0]:
continue
for vm_name, details in six.iteritems(vms):
# XXX: The logic below can be removed once the aws driver
# is removed
if vm_name not in names:
continue
elif driver == 'ec2' and 'aws' in handled_drivers and \
'aws' in matches[handled_drivers['aws']] and \
vm_name in matches[handled_drivers['aws']]['aws']:
continue
elif driver == 'aws' and 'ec2' in handled_drivers and \
'ec2' in matches[handled_drivers['ec2']] and \
vm_name in matches[handled_drivers['ec2']]['ec2']:
continue
if alias not in matches:
matches[alias] = {}
if driver not in matches[alias]:
matches[alias][driver] = {}
matches[alias][driver][vm_name] = details
return matches
def _optimize_providers(self, providers):
'''
Return an optimized mapping of available providers
'''
new_providers = {}
provider_by_driver = {}
for alias, driver in six.iteritems(providers):
for name, data in six.iteritems(driver):
if name not in provider_by_driver:
provider_by_driver[name] = {}
provider_by_driver[name][alias] = data
for driver, providers_data in six.iteritems(provider_by_driver):
fun = '{0}.optimize_providers'.format(driver)
if fun not in self.clouds:
log.debug('The \'%s\' cloud driver is unable to be optimized.', driver)
for name, prov_data in six.iteritems(providers_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
continue
new_data = self.clouds[fun](providers_data)
if new_data:
for name, prov_data in six.iteritems(new_data):
if name not in new_providers:
new_providers[name] = {}
new_providers[name][driver] = prov_data
return new_providers
def location_list(self, lookup='all'):
'''
Return a mapping of all location data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_locations'.format(driver)
if fun not in self.clouds:
# The capability to gather locations is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the locations information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def image_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_images'.format(driver)
if fun not in self.clouds:
# The capability to gather images is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the images information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def size_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
fun = '{0}.avail_sizes'.format(driver)
if fun not in self.clouds:
# The capability to gather sizes is not supported by this
# cloud module
log.debug(
'The \'%s\' cloud driver defined under \'%s\' provider '
'alias is unable to get the sizes information',
driver, alias
)
continue
if alias not in data:
data[alias] = {}
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
data[alias][driver] = self.clouds[fun]()
except Exception as err:
log.error(
'Failed to get the output of \'%s()\': %s',
fun, err, exc_info_on_loglevel=logging.DEBUG
)
return data
def provider_list(self, lookup='all'):
'''
Return a mapping of all image data for available providers
'''
data = {}
lookups = self.lookup_providers(lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def profile_list(self, provider, lookup='all'):
'''
Return a mapping of all configured profiles
'''
data = {}
lookups = self.lookup_profiles(provider, lookup)
if not lookups:
return data
for alias, driver in lookups:
if alias not in data:
data[alias] = {}
if driver not in data[alias]:
data[alias][driver] = {}
return data
def create_all(self):
'''
Create/Verify the VMs in the VM data
'''
ret = []
for vm_name, vm_details in six.iteritems(self.opts['profiles']):
ret.append(
{vm_name: self.create(vm_details)}
)
return ret
def destroy(self, names, cached=False):
'''
Destroy the named VMs
'''
processed = {}
names = set(names)
matching = self.get_running_by_names(names, cached=cached)
vms_to_destroy = set()
parallel_data = []
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for name in vms:
if name in names:
vms_to_destroy.add((alias, driver, name))
if self.opts['parallel']:
parallel_data.append({
'opts': self.opts,
'name': name,
'alias': alias,
'driver': driver,
})
# destroying in parallel
if self.opts['parallel'] and parallel_data:
# set the pool size based on configuration or default to
# the number of machines we're destroying
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Destroying in parallel mode; '
'Cloud pool size: %s', pool_size)
# kick off the parallel destroy
output_multip = enter_mainloop(
_destroy_multiprocessing, parallel_data, pool_size=pool_size)
# massage the multiprocessing output a bit
ret_multip = {}
for obj in output_multip:
ret_multip.update(obj)
# build up a data structure similar to what the non-parallel
# destroy uses
for obj in parallel_data:
alias = obj['alias']
driver = obj['driver']
name = obj['name']
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret_multip[name]
if name in names:
names.remove(name)
# not destroying in parallel
else:
log.info('Destroying in non-parallel mode.')
for alias, driver, name in vms_to_destroy:
fun = '{0}.destroy'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
ret = self.clouds[fun](name)
if alias not in processed:
processed[alias] = {}
if driver not in processed[alias]:
processed[alias][driver] = {}
processed[alias][driver][name] = ret
if name in names:
names.remove(name)
# now the processed data structure contains the output from either
# the parallel or non-parallel destroy and we should finish up
# with removing minion keys if necessary
for alias, driver, name in vms_to_destroy:
ret = processed[alias][driver][name]
if not ret:
continue
vm_ = {
'name': name,
'profile': None,
'provider': ':'.join([alias, driver]),
'driver': driver
}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
key_file = os.path.join(
self.opts['pki_dir'], 'minions', minion_dict.get('id', name)
)
globbed_key_file = glob.glob('{0}.*'.format(key_file))
if not os.path.isfile(key_file) and not globbed_key_file:
# There's no such key file!? It might have been renamed
if isinstance(ret, dict) and 'newname' in ret:
salt.utils.cloud.remove_key(
self.opts['pki_dir'], ret['newname']
)
continue
if os.path.isfile(key_file) and not globbed_key_file:
# Single key entry. Remove it!
salt.utils.cloud.remove_key(self.opts['pki_dir'], os.path.basename(key_file))
continue
# Since we have globbed matches, there are probably some keys for which their minion
# configuration has append_domain set.
if not os.path.isfile(key_file) and globbed_key_file and len(globbed_key_file) == 1:
# Single entry, let's remove it!
salt.utils.cloud.remove_key(
self.opts['pki_dir'],
os.path.basename(globbed_key_file[0])
)
continue
# Since we can't get the profile or map entry used to create
# the VM, we can't also get the append_domain setting.
# And if we reached this point, we have several minion keys
# who's name starts with the machine name we're deleting.
# We need to ask one by one!?
print(
'There are several minion keys who\'s name starts '
'with \'{0}\'. We need to ask you which one should be '
'deleted:'.format(
name
)
)
while True:
for idx, filename in enumerate(globbed_key_file):
print(' {0}: {1}'.format(
idx, os.path.basename(filename)
))
selection = input(
'Which minion key should be deleted(number)? '
)
try:
selection = int(selection)
except ValueError:
print(
'\'{0}\' is not a valid selection.'.format(selection)
)
try:
filename = os.path.basename(
globbed_key_file.pop(selection)
)
except Exception:
continue
delete = input(
'Delete \'{0}\'? [Y/n]? '.format(filename)
)
if delete == '' or delete.lower().startswith('y'):
salt.utils.cloud.remove_key(
self.opts['pki_dir'], filename
)
print('Deleted \'{0}\''.format(filename))
break
print('Did not delete \'{0}\''.format(filename))
break
if names and not processed:
# These machines were asked to be destroyed but could not be found
raise SaltCloudSystemExit(
'The following VM\'s were not found: {0}'.format(
', '.join(names)
)
)
elif names and processed:
processed['Not Found'] = names
elif not processed:
raise SaltCloudSystemExit('No machines were destroyed!')
return processed
def reboot(self, names):
'''
Reboot the named VMs
'''
ret = []
pmap = self.map_providers_parallel()
acts = {}
for prov, nodes in six.iteritems(pmap):
acts[prov] = []
for node in nodes:
if node in names:
acts[prov].append(node)
for prov, names_ in six.iteritems(acts):
fun = '{0}.reboot'.format(prov)
for name in names_:
ret.append({
name: self.clouds[fun](name)
})
return ret
def create(self, vm_, local_master=True):
'''
Create a single VM
'''
output = {}
minion_dict = salt.config.get_cloud_config_value(
'minion', vm_, self.opts, default={}
)
alias, driver = vm_['provider'].split(':')
fun = '{0}.create'.format(driver)
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
vm_['name'], vm_['provider'], driver
)
return
deploy = salt.config.get_cloud_config_value('deploy', vm_, self.opts)
make_master = salt.config.get_cloud_config_value(
'make_master',
vm_,
self.opts
)
if deploy:
if not make_master and 'master' not in minion_dict:
log.warning(
'There\'s no master defined on the \'%s\' VM settings.',
vm_['name']
)
if 'pub_key' not in vm_ and 'priv_key' not in vm_:
log.debug('Generating minion keys for \'%s\'', vm_['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['pub_key'] = pub
vm_['priv_key'] = priv
else:
# Note(pabelanger): We still reference pub_key and priv_key when
# deploy is disabled.
vm_['pub_key'] = None
vm_['priv_key'] = None
key_id = minion_dict.get('id', vm_['name'])
domain = vm_.get('domain')
if vm_.get('use_fqdn') and domain:
minion_dict['append_domain'] = domain
if 'append_domain' in minion_dict:
key_id = '.'.join([key_id, minion_dict['append_domain']])
if make_master is True and 'master_pub' not in vm_ and 'master_pem' not in vm_:
log.debug('Generating the master keys for \'%s\'', vm_['name'])
master_priv, master_pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
vm_,
self.opts
)
)
vm_['master_pub'] = master_pub
vm_['master_pem'] = master_priv
if local_master is True and deploy is True:
# Accept the key on the local master
salt.utils.cloud.accept_key(
self.opts['pki_dir'], vm_['pub_key'], key_id
)
vm_['os'] = salt.config.get_cloud_config_value(
'script',
vm_,
self.opts
)
try:
vm_['inline_script'] = salt.config.get_cloud_config_value(
'inline_script',
vm_,
self.opts
)
except KeyError:
pass
try:
alias, driver = vm_['provider'].split(':')
func = '{0}.create'.format(driver)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
output = self.clouds[func](vm_)
if output is not False and 'sync_after_install' in self.opts:
if self.opts['sync_after_install'] not in (
'all', 'modules', 'states', 'grains'):
log.error('Bad option for sync_after_install')
return output
# A small pause helps the sync work more reliably
time.sleep(3)
start = int(time.time())
while int(time.time()) < start + 60:
# We'll try every <timeout> seconds, up to a minute
mopts_ = salt.config.DEFAULT_MASTER_OPTS
conf_path = '/'.join(self.opts['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.master_config(
os.path.join(conf_path,
'master')
)
)
client = salt.client.get_local_client(mopts=mopts_)
ret = client.cmd(
vm_['name'],
'saltutil.sync_{0}'.format(self.opts['sync_after_install']),
timeout=self.opts['timeout']
)
if ret:
log.info(
six.u('Synchronized the following dynamic modules: '
' {0}').format(ret)
)
break
except KeyError as exc:
log.exception(
'Failed to create VM %s. Configuration value %s needs '
'to be set', vm_['name'], exc
)
# If it's a map then we need to respect the 'requires'
# so we do it later
try:
opt_map = self.opts['map']
except KeyError:
opt_map = False
if self.opts['parallel'] and self.opts['start_action'] and not opt_map:
log.info('Running %s on %s', self.opts['start_action'], vm_['name'])
client = salt.client.get_local_client(mopts=self.opts)
action_out = client.cmd(
vm_['name'],
self.opts['start_action'],
timeout=self.opts['timeout'] * 60
)
output['ret'] = action_out
return output
@staticmethod
def vm_config(name, main, provider, profile, overrides):
'''
Create vm config.
:param str name: The name of the vm
:param dict main: The main cloud config
:param dict provider: The provider config
:param dict profile: The profile config
:param dict overrides: The vm's config overrides
'''
vm = main.copy()
vm = salt.utils.dictupdate.update(vm, provider)
vm = salt.utils.dictupdate.update(vm, profile)
vm.update(overrides)
vm['name'] = name
return vm
def extras(self, extra_):
'''
Extra actions
'''
output = {}
alias, driver = extra_['provider'].split(':')
fun = '{0}.{1}'.format(driver, extra_['action'])
if fun not in self.clouds:
log.error(
'Creating \'%s\' using \'%s\' as the provider '
'cannot complete since \'%s\' is not available',
extra_['name'], extra_['provider'], driver
)
return
try:
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=extra_['provider']
):
output = self.clouds[fun](**extra_)
except KeyError as exc:
log.exception(
'Failed to perform %s.%s on %s. '
'Configuration value %s needs to be set',
extra_['provider'], extra_['action'], extra_['name'], exc
)
return output
def run_profile(self, profile, names, vm_overrides=None):
'''
Parse over the options passed on the command line and determine how to
handle them
'''
if profile not in self.opts['profiles']:
msg = 'Profile {0} is not defined'.format(profile)
log.error(msg)
return {'Error': msg}
ret = {}
if not vm_overrides:
vm_overrides = {}
try:
with salt.utils.files.fopen(self.opts['conf_file'], 'r') as mcc:
main_cloud_config = salt.utils.yaml.safe_load(mcc)
if not main_cloud_config:
main_cloud_config = {}
except KeyError:
main_cloud_config = {}
except IOError:
main_cloud_config = {}
if main_cloud_config is None:
main_cloud_config = {}
mapped_providers = self.map_providers_parallel()
profile_details = self.opts['profiles'][profile]
vms = {}
for prov, val in six.iteritems(mapped_providers):
prov_name = next(iter(val))
for node in mapped_providers[prov][prov_name]:
vms[node] = mapped_providers[prov][prov_name][node]
vms[node]['provider'] = prov
vms[node]['driver'] = prov_name
alias, driver = profile_details['provider'].split(':')
provider_details = self.opts['providers'][alias][driver].copy()
del provider_details['profiles']
for name in names:
if name in vms:
prov = vms[name]['provider']
driv = vms[name]['driver']
msg = '{0} already exists under {1}:{2}'.format(
name, prov, driv
)
log.error(msg)
ret[name] = {'Error': msg}
continue
vm_ = self.vm_config(
name,
main_cloud_config,
provider_details,
profile_details,
vm_overrides,
)
if self.opts['parallel']:
process = multiprocessing.Process(
target=self.create,
args=(vm_,)
)
process.start()
ret[name] = {
'Provisioning': 'VM being provisioned in parallel. '
'PID: {0}'.format(process.pid)
}
continue
try:
# No need to inject __active_provider_name__ into the context
# here because self.create takes care of that
ret[name] = self.create(vm_)
if not ret[name]:
ret[name] = {'Error': 'Failed to deploy VM'}
if len(names) == 1:
raise SaltCloudSystemExit('Failed to deploy VM')
continue
if self.opts.get('show_deploy_args', False) is False:
ret[name].pop('deploy_kwargs', None)
except (SaltCloudSystemExit, SaltCloudConfigError) as exc:
if len(names) == 1:
raise
ret[name] = {'Error': str(exc)}
return ret
def do_action(self, names, kwargs):
'''
Perform an action on a VM which may be specific to this cloud provider
'''
ret = {}
invalid_functions = {}
names = set(names)
for alias, drivers in six.iteritems(self.map_providers_parallel()):
if not names:
break
for driver, vms in six.iteritems(drivers):
if not names:
break
valid_function = True
fun = '{0}.{1}'.format(driver, self.opts['action'])
if fun not in self.clouds:
log.info('\'%s()\' is not available. Not actioning...', fun)
valid_function = False
for vm_name, vm_details in six.iteritems(vms):
if not names:
break
if vm_name not in names:
if not isinstance(vm_details, dict):
vm_details = {}
if 'id' in vm_details and vm_details['id'] in names:
vm_name = vm_details['id']
else:
log.debug(
'vm:%s in provider:%s is not in name '
'list:\'%s\'', vm_name, driver, names
)
continue
# Build the dictionary of invalid functions with their associated VMs.
if valid_function is False:
if invalid_functions.get(fun) is None:
invalid_functions.update({fun: []})
invalid_functions[fun].append(vm_name)
continue
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if alias not in ret:
ret[alias] = {}
if driver not in ret[alias]:
ret[alias][driver] = {}
# Clean kwargs of "__pub_*" data before running the cloud action call.
# Prevents calling positional "kwarg" arg before "call" when no kwarg
# argument is present in the cloud driver function's arg spec.
kwargs = salt.utils.args.clean_kwargs(**kwargs)
if kwargs:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, kwargs, call='action'
)
else:
ret[alias][driver][vm_name] = self.clouds[fun](
vm_name, call='action'
)
names.remove(vm_name)
# Set the return information for the VMs listed in the invalid_functions dict.
missing_vms = set()
if invalid_functions:
ret['Invalid Actions'] = invalid_functions
invalid_func_vms = set()
for key, val in six.iteritems(invalid_functions):
invalid_func_vms = invalid_func_vms.union(set(val))
# Find the VMs that are in names, but not in set of invalid functions.
missing_vms = names.difference(invalid_func_vms)
if missing_vms:
ret['Not Found'] = list(missing_vms)
ret['Not Actioned/Not Running'] = list(names)
if not names:
return ret
# Don't return missing VM information for invalid functions until after we've had a
# Chance to return successful actions. If a function is valid for one driver, but
# Not another, we want to make sure the successful action is returned properly.
if missing_vms:
return ret
# If we reach this point, the Not Actioned and Not Found lists will be the same,
# But we want to list both for clarity/consistency with the invalid functions lists.
ret['Not Actioned/Not Running'] = list(names)
ret['Not Found'] = list(names)
return ret
def do_function(self, prov, func, kwargs):
'''
Perform a function against a cloud provider
'''
matches = self.lookup_providers(prov)
if len(matches) > 1:
raise SaltCloudSystemExit(
'More than one results matched \'{0}\'. Please specify '
'one of: {1}'.format(
prov,
', '.join([
'{0}:{1}'.format(alias, driver) for
(alias, driver) in matches
])
)
)
alias, driver = matches.pop()
fun = '{0}.{1}'.format(driver, func)
if fun not in self.clouds:
raise SaltCloudSystemExit(
'The \'{0}\' cloud provider alias, for the \'{1}\' driver, does '
'not define the function \'{2}\''.format(alias, driver, func)
)
log.debug(
'Trying to execute \'%s\' with the following kwargs: %s',
fun, kwargs
)
with salt.utils.context.func_globals_inject(
self.clouds[fun],
__active_provider_name__=':'.join([alias, driver])
):
if kwargs:
return {
alias: {
driver: self.clouds[fun](
call='function', kwargs=kwargs
)
}
}
return {
alias: {
driver: self.clouds[fun](call='function')
}
}
|
saltstack/salt
|
salt/cloud/__init__.py
|
Map.read
|
python
|
def read(self):
'''
Read in the specified map and return the map structure
'''
map_ = None
if self.opts.get('map', None) is None:
if self.opts.get('map_data', None) is None:
if self.opts.get('map_pillar', None) is None:
pass
elif self.opts.get('map_pillar') not in self.opts.get('maps'):
log.error(
'The specified map not found in pillar at '
'\'cloud:maps:%s\'', self.opts['map_pillar']
)
raise SaltCloudNotFound()
else:
# 'map_pillar' is provided, try to use it
map_ = self.opts['maps'][self.opts.get('map_pillar')]
else:
# 'map_data' is provided, try to use it
map_ = self.opts['map_data']
else:
# 'map' is provided, try to use it
local_minion_opts = copy.deepcopy(self.opts)
local_minion_opts['file_client'] = 'local'
self.minion = salt.minion.MasterMinion(local_minion_opts)
if not os.path.isfile(self.opts['map']):
if not (self.opts['map']).startswith('salt://'):
log.error(
'The specified map file does not exist: \'%s\'',
self.opts['map']
)
raise SaltCloudNotFound()
if (self.opts['map']).startswith('salt://'):
cached_map = self.minion.functions['cp.cache_file'](self.opts['map'])
else:
cached_map = self.opts['map']
try:
renderer = self.opts.get('renderer', 'jinja|yaml')
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
map_ = compile_template(
cached_map, rend, renderer, blacklist, whitelist
)
except Exception as exc:
log.error(
'Rendering map %s failed, render error:\n%s',
self.opts['map'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return {}
if 'include' in map_:
map_ = salt.config.include_config(
map_, self.opts['map'], verbose=False
)
if not map_:
return {}
# Create expected data format if needed
for profile, mapped in six.iteritems(map_.copy()):
if isinstance(mapped, (list, tuple)):
entries = {}
for mapping in mapped:
if isinstance(mapping, six.string_types):
# Foo:
# - bar1
# - bar2
mapping = {mapping: None}
for name, overrides in six.iteritems(mapping):
if overrides is None or isinstance(overrides, bool):
# Foo:
# - bar1:
# - bar2:
overrides = {}
try:
overrides.setdefault('name', name)
except AttributeError:
log.error(
'Cannot use \'name\' as a minion id in a cloud map as it '
'is a reserved word. Please change \'name\' to a different '
'minion id reference.'
)
return {}
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, dict):
# Convert the dictionary mapping to a list of dictionaries
# Foo:
# bar1:
# grains:
# foo: bar
# bar2:
# grains:
# foo: bar
entries = {}
for name, overrides in six.iteritems(mapped):
overrides.setdefault('name', name)
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, six.string_types):
# If it's a single string entry, let's make iterable because of
# the next step
mapped = [mapped]
map_[profile] = {}
for name in mapped:
map_[profile][name] = {'name': name}
return map_
|
Read in the specified map and return the map structure
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1742-L1857
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def compile_template(template,\n renderers,\n default,\n blacklist,\n whitelist,\n saltenv='base',\n sls='',\n input_data='',\n **kwargs):\n '''\n Take the path to a template and return the high data structure\n derived from the template.\n\n Helpers:\n\n :param mask_value:\n Mask value for debugging purposes (prevent sensitive information etc)\n example: \"mask_value=\"pass*\". All \"passwd\", \"password\", \"pass\" will\n be masked (as text).\n '''\n\n # if any error occurs, we return an empty dictionary\n ret = {}\n\n log.debug('compile template: %s', template)\n\n if 'env' in kwargs:\n # \"env\" is not supported; Use \"saltenv\".\n kwargs.pop('env')\n\n if template != ':string:':\n # Template was specified incorrectly\n if not isinstance(template, six.string_types):\n log.error('Template was specified incorrectly: %s', template)\n return ret\n # Template does not exist\n if not os.path.isfile(template):\n log.error('Template does not exist: %s', template)\n return ret\n # Template is an empty file\n if salt.utils.files.is_empty(template):\n log.debug('Template is an empty file: %s', template)\n return ret\n\n with codecs.open(template, encoding=SLS_ENCODING) as ifile:\n # data input to the first render function in the pipe\n input_data = ifile.read()\n if not input_data.strip():\n # Template is nothing but whitespace\n log.error('Template is nothing but whitespace: %s', template)\n return ret\n\n # Get the list of render funcs in the render pipe line.\n render_pipe = template_shebang(template, renderers, default, blacklist, whitelist, input_data)\n\n windows_newline = '\\r\\n' in input_data\n\n input_data = StringIO(input_data)\n for render, argline in render_pipe:\n if salt.utils.stringio.is_readable(input_data):\n input_data.seek(0) # pylint: disable=no-member\n render_kwargs = dict(renderers=renderers, tmplpath=template)\n render_kwargs.update(kwargs)\n if argline:\n render_kwargs['argline'] = argline\n start = time.time()\n ret = render(input_data, saltenv, sls, **render_kwargs)\n log.profile(\n 'Time (in seconds) to render \\'%s\\' using \\'%s\\' renderer: %s',\n template,\n render.__module__.split('.')[-1],\n time.time() - start\n )\n if ret is None:\n # The file is empty or is being written elsewhere\n time.sleep(0.01)\n ret = render(input_data, saltenv, sls, **render_kwargs)\n input_data = ret\n if log.isEnabledFor(logging.GARBAGE): # pylint: disable=no-member\n # If ret is not a StringIO (which means it was rendered using\n # yaml, mako, or another engine which renders to a data\n # structure) we don't want to log this.\n if salt.utils.stringio.is_readable(ret):\n log.debug('Rendered data from file: %s:\\n%s', template,\n salt.utils.sanitizers.mask_args_value(salt.utils.data.decode(ret.read()),\n kwargs.get('mask_value'))) # pylint: disable=no-member\n ret.seek(0) # pylint: disable=no-member\n\n # Preserve newlines from original template\n if windows_newline:\n if salt.utils.stringio.is_readable(ret):\n is_stringio = True\n contents = ret.read()\n else:\n is_stringio = False\n contents = ret\n\n if isinstance(contents, six.string_types):\n if '\\r\\n' not in contents:\n contents = contents.replace('\\n', '\\r\\n')\n ret = StringIO(contents) if is_stringio else contents\n else:\n if is_stringio:\n ret.seek(0)\n return ret\n",
"def include_config(include, orig_path, verbose, exit_on_config_errors=False):\n '''\n Parses extra configuration file(s) specified in an include list in the\n main config file.\n '''\n # Protect against empty option\n if not include:\n return {}\n\n if orig_path is None:\n # When the passed path is None, we just want the configuration\n # defaults, not actually loading the whole configuration.\n return {}\n\n if isinstance(include, six.string_types):\n include = [include]\n\n configuration = {}\n for path in include:\n # Allow for includes like ~/foo\n path = os.path.expanduser(path)\n if not os.path.isabs(path):\n path = os.path.join(os.path.dirname(orig_path), path)\n\n # Catch situation where user typos path in configuration; also warns\n # for empty include directory (which might be by design)\n glob_matches = glob.glob(path)\n if not glob_matches:\n if verbose:\n log.warning(\n 'Warning parsing configuration file: \"include\" path/glob '\n \"'%s' matches no files\", path\n )\n\n for fn_ in sorted(glob_matches):\n log.debug('Including configuration from \\'%s\\'', fn_)\n try:\n opts = _read_conf_file(fn_)\n except salt.exceptions.SaltConfigurationError as error:\n log.error(error)\n if exit_on_config_errors:\n sys.exit(salt.defaults.exitcodes.EX_GENERIC)\n else:\n # Initialize default config if we wish to skip config errors\n opts = {}\n schedule = opts.get('schedule', {})\n if schedule and 'schedule' in configuration:\n configuration['schedule'].update(schedule)\n include = opts.get('include', [])\n if include:\n opts.update(include_config(include, fn_, verbose))\n\n salt.utils.dictupdate.update(configuration, opts, True, True)\n\n return configuration\n"
] |
class Map(Cloud):
'''
Create a VM stateful map execution object
'''
def __init__(self, opts):
Cloud.__init__(self, opts)
self.rendered_map = self.read()
def interpolated_map(self, query='list_nodes', cached=False):
rendered_map = self.read().copy()
interpolated_map = {}
for profile, mapped_vms in six.iteritems(rendered_map):
names = set(mapped_vms)
if profile not in self.opts['profiles']:
if 'Errors' not in interpolated_map:
interpolated_map['Errors'] = {}
msg = (
'No provider for the mapped \'{0}\' profile was found. '
'Skipped VMS: {1}'.format(
profile, ', '.join(names)
)
)
log.info(msg)
interpolated_map['Errors'][profile] = msg
continue
matching = self.get_running_by_names(names, query, cached)
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = vm_details
try:
names.remove(vm_name)
except KeyError:
# If it's not there, then our job is already done
pass
if not names:
continue
profile_details = self.opts['profiles'][profile]
alias, driver = profile_details['provider'].split(':')
for vm_name in names:
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = 'Absent'
return interpolated_map
def delete_map(self, query=None):
query_map = self.interpolated_map(query=query)
for alias, drivers in six.iteritems(query_map.copy()):
for driver, vms in six.iteritems(drivers.copy()):
for vm_name, vm_details in six.iteritems(vms.copy()):
if vm_details == 'Absent':
query_map[alias][driver].pop(vm_name)
if not query_map[alias][driver]:
query_map[alias].pop(driver)
if not query_map[alias]:
query_map.pop(alias)
return query_map
def get_vmnames_by_action(self, action):
query_map = self.interpolated_map("list_nodes")
matching_states = {
'start': ['stopped'],
'stop': ['running', 'active'],
'reboot': ['running', 'active'],
}
vm_names = []
for alias, drivers in six.iteritems(query_map):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
# Only certain actions are support in to use in this case. Those actions are the
# "Global" salt-cloud actions defined in the "matching_states" dictionary above.
# If a more specific action is passed in, we shouldn't stack-trace - exit gracefully.
try:
state_action = matching_states[action]
except KeyError:
log.error(
'The use of \'%s\' as an action is not supported '
'in this context. Only \'start\', \'stop\', and '
'\'reboot\' are supported options.', action
)
raise SaltCloudException()
if vm_details != 'Absent' and vm_details['state'].lower() in state_action:
vm_names.append(vm_name)
return vm_names
def _has_loop(self, dmap, seen=None, val=None):
if seen is None:
for values in six.itervalues(dmap['create']):
seen = []
try:
machines = values['requires']
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
else:
if val in seen:
return True
seen.append(val)
try:
machines = dmap['create'][val]['requires']
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
return False
def _calcdep(self, dmap, machine, data, level):
try:
deplist = data['requires']
except KeyError:
return level
levels = []
for name in deplist:
try:
data = dmap['create'][name]
except KeyError:
try:
data = dmap['existing'][name]
except KeyError:
msg = 'Missing dependency in cloud map'
log.error(msg)
raise SaltCloudException(msg)
levels.append(self._calcdep(dmap, name, data, level))
level = max(levels) + 1
return level
def map_data(self, cached=False):
'''
Create a data map of what to execute on
'''
ret = {'create': {}}
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
rendered_map = copy.deepcopy(self.rendered_map)
for profile_name, nodes in six.iteritems(rendered_map):
if profile_name not in self.opts['profiles']:
msg = (
'The required profile, \'{0}\', defined in the map '
'does not exist. The defined nodes, {1}, will not '
'be created.'.format(
profile_name,
', '.join('\'{0}\''.format(node) for node in nodes)
)
)
log.error(msg)
if 'errors' not in ret:
ret['errors'] = {}
ret['errors'][profile_name] = msg
continue
profile_data = self.opts['profiles'].get(profile_name)
for nodename, overrides in six.iteritems(nodes):
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
if 'provider' in overrides and overrides['provider'] != profile_data['provider']:
alias, driver = overrides.get('provider').split(':')
else:
alias, driver = profile_data.get('provider').split(':')
provider_details = copy.deepcopy(self.opts['providers'][alias][driver])
del provider_details['profiles']
# Update the provider details information with profile data
# Profile data and node overrides should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
salt.utils.dictupdate.update(provider_details, profile_data)
nodedata = copy.deepcopy(provider_details)
# Update profile data with the map overrides
for setting in ('grains', 'master', 'minion', 'volumes',
'requires'):
deprecated = 'map_{0}'.format(setting)
if deprecated in overrides:
log.warning(
'The use of \'%s\' on the \'%s\' mapping has '
'been deprecated. The preferred way now is to '
'just define \'%s\'. For now, salt-cloud will do '
'the proper thing and convert the deprecated '
'mapping into the preferred one.',
deprecated, nodename, setting
)
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if 'minion' in overrides and \
'minion' in nodedata and \
'grains' in overrides['minion'] and \
'grains' in nodedata['minion']:
nodedata['minion']['grains'].update(
overrides['minion']['grains']
)
del overrides['minion']['grains']
# remove minion key if now is empty dict
if not overrides['minion']:
del overrides['minion']
nodedata = salt.utils.dictupdate.update(nodedata, overrides)
# Add the computed information to the return data
ret['create'][nodename] = nodedata
# Add the node name to the defined set
alias, driver = nodedata['provider'].split(':')
defined.add((alias, driver, nodename))
def get_matching_by_name(name):
matches = {}
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for vm_name, details in six.iteritems(vms):
if vm_name == name and driver not in matches:
matches[driver] = details['state']
return matches
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for name, details in six.iteritems(vms):
exist.add((alias, driver, name))
if name not in ret['create']:
continue
# The machine is set to be created. Does it already exist?
matching = get_matching_by_name(name)
if not matching:
continue
# A machine by the same name exists
for item in matching:
if name not in ret['create']:
# Machine already removed
break
log.warning("'%s' already exists, removing from "
'the create map.', name)
if 'existing' not in ret:
ret['existing'] = {}
ret['existing'][name] = ret['create'].pop(name)
if 'hard' in self.opts and self.opts['hard']:
if self.opts['enable_hard_maps'] is False:
raise SaltCloudSystemExit(
'The --hard map can be extremely dangerous to use, '
'and therefore must explicitly be enabled in the main '
'configuration file, by setting \'enable_hard_maps\' '
'to True'
)
# Hard maps are enabled, Look for the items to delete.
ret['destroy'] = exist.difference(defined)
return ret
def run_map(self, dmap):
'''
Execute the contents of the VM map
'''
if self._has_loop(dmap):
msg = 'Uh-oh, that cloud map has a dependency loop!'
log.error(msg)
raise SaltCloudException(msg)
# Go through the create list and calc dependencies
for key, val in six.iteritems(dmap['create']):
log.info('Calculating dependencies for %s', key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order %s for %s', level, key)
dmap['create'][key]['level'] = level
try:
existing_list = six.iteritems(dmap['existing'])
except KeyError:
existing_list = six.iteritems({})
for key, val in existing_list:
log.info('Calculating dependencies for %s', key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order %s for %s', level, key)
dmap['existing'][key]['level'] = level
# Now sort the create list based on dependencies
create_list = sorted(six.iteritems(dmap['create']), key=lambda x: x[1]['level'])
full_map = dmap['create'].copy()
if 'existing' in dmap:
full_map.update(dmap['existing'])
possible_master_list = sorted(six.iteritems(full_map), key=lambda x: x[1]['level'])
output = {}
if self.opts['parallel']:
parallel_data = []
master_name = None
master_minion_name = None
master_host = None
master_finger = None
for name, profile in possible_master_list:
if profile.get('make_master', False) is True:
master_name = name
master_profile = profile
if master_name:
# If the master already exists, get the host
if master_name not in dmap['create']:
master_host = self.client.query()
for provider_part in master_profile['provider'].split(':'):
master_host = master_host[provider_part]
master_host = master_host[master_name][master_profile.get('ssh_interface', 'public_ips')]
if not master_host:
raise SaltCloudSystemExit(
'Could not get the hostname of master {}.'.format(master_name)
)
# Otherwise, deploy it as a new master
else:
master_minion_name = master_name
log.debug('Creating new master \'%s\'', master_name)
if salt.config.get_cloud_config_value(
'deploy',
master_profile,
self.opts
) is False:
raise SaltCloudSystemExit(
'Cannot proceed with \'make_master\' when salt deployment '
'is disabled(ex: --no-deploy).'
)
# Generate the master keys
log.debug('Generating master keys for \'%s\'', master_profile['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
master_profile,
self.opts
)
)
master_profile['master_pub'] = pub
master_profile['master_pem'] = priv
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_temp_pub = salt.utils.files.mkstemp()
with salt.utils.files.fopen(master_temp_pub, 'w') as mtp:
mtp.write(pub)
master_finger = salt.utils.crypt.pem_finger(master_temp_pub, sum_type=self.opts['hash_type'])
os.unlink(master_temp_pub)
if master_profile.get('make_minion', True) is True:
master_profile.setdefault('minion', {})
if 'id' in master_profile['minion']:
master_minion_name = master_profile['minion']['id']
# Set this minion's master as local if the user has not set it
if 'master' not in master_profile['minion']:
master_profile['minion']['master'] = '127.0.0.1'
if master_finger is not None:
master_profile['master_finger'] = master_finger
# Generate the minion keys to pre-seed the master:
for name, profile in create_list:
make_minion = salt.config.get_cloud_config_value(
'make_minion', profile, self.opts, default=True
)
if make_minion is False:
continue
log.debug('Generating minion keys for \'%s\'', profile['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
profile,
self.opts
)
)
profile['pub_key'] = pub
profile['priv_key'] = priv
# Store the minion's public key in order to be pre-seeded in
# the master
master_profile.setdefault('preseed_minion_keys', {})
master_profile['preseed_minion_keys'].update({name: pub})
local_master = False
if master_profile['minion'].get('local_master', False) and \
master_profile['minion'].get('master', None) is not None:
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
out = self.create(master_profile, local_master=local_master)
if not isinstance(out, dict):
log.debug('Master creation details is not a dictionary: %s', out)
elif 'Errors' in out:
raise SaltCloudSystemExit(
'An error occurred while creating the master, not '
'continuing: {0}'.format(out['Errors'])
)
deploy_kwargs = (
self.opts.get('show_deploy_args', False) is True and
# Get the needed data
out.get('deploy_kwargs', {}) or
# Strip the deploy_kwargs from the returned data since we don't
# want it shown in the console.
out.pop('deploy_kwargs', {})
)
master_host = deploy_kwargs.get('salt_host', deploy_kwargs.get('host', None))
if master_host is None:
raise SaltCloudSystemExit(
'Host for new master {0} was not found, '
'aborting map'.format(
master_name
)
)
output[master_name] = out
else:
log.debug('No make_master found in map')
# Local master?
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_pub = os.path.join(self.opts['pki_dir'], 'master.pub')
if os.path.isfile(master_pub):
master_finger = salt.utils.crypt.pem_finger(master_pub, sum_type=self.opts['hash_type'])
opts = self.opts.copy()
if self.opts['parallel']:
# Force display_ssh_output to be False since the console will
# need to be reset afterwards
log.info(
'Since parallel deployment is in use, ssh console output '
'is disabled. All ssh output will be logged though'
)
opts['display_ssh_output'] = False
local_master = master_name is None
for name, profile in create_list:
if name in (master_name, master_minion_name):
# Already deployed, it's the master's minion
continue
if 'minion' in profile and profile['minion'].get('local_master', False) and \
profile['minion'].get('master', None) is not None:
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
if master_finger is not None and local_master is False:
profile['master_finger'] = master_finger
if master_host is not None:
profile.setdefault('minion', {})
profile['minion'].setdefault('master', master_host)
if self.opts['parallel']:
parallel_data.append({
'opts': opts,
'name': name,
'profile': profile,
'local_master': local_master
})
continue
# Not deploying in parallel
try:
output[name] = self.create(
profile, local_master=local_master
)
if self.opts.get('show_deploy_args', False) is False \
and 'deploy_kwargs' in output \
and isinstance(output[name], dict):
output[name].pop('deploy_kwargs', None)
except SaltCloudException as exc:
log.error(
'Failed to deploy \'%s\'. Error: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
output[name] = {'Error': str(exc)}
for name in dmap.get('destroy', ()):
output[name] = self.destroy(name)
if self.opts['parallel'] and parallel_data:
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Cloud pool size: %s', pool_size)
output_multip = enter_mainloop(
_create_multiprocessing, parallel_data, pool_size=pool_size)
# We have deployed in parallel, now do start action in
# correct order based on dependencies.
if self.opts['start_action']:
actionlist = []
grp = -1
for key, val in groupby(six.itervalues(dmap['create']), lambda x: x['level']):
actionlist.append([])
grp += 1
for item in val:
actionlist[grp].append(item['name'])
out = {}
for group in actionlist:
log.info('Running %s on %s', self.opts['start_action'], ', '.join(group))
client = salt.client.get_local_client()
out.update(client.cmd(
','.join(group), self.opts['start_action'],
timeout=self.opts['timeout'] * 60, tgt_type='list'
))
for obj in output_multip:
next(six.itervalues(obj))['ret'] = out[next(six.iterkeys(obj))]
output.update(obj)
else:
for obj in output_multip:
output.update(obj)
return output
|
saltstack/salt
|
salt/cloud/__init__.py
|
Map.map_data
|
python
|
def map_data(self, cached=False):
'''
Create a data map of what to execute on
'''
ret = {'create': {}}
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
rendered_map = copy.deepcopy(self.rendered_map)
for profile_name, nodes in six.iteritems(rendered_map):
if profile_name not in self.opts['profiles']:
msg = (
'The required profile, \'{0}\', defined in the map '
'does not exist. The defined nodes, {1}, will not '
'be created.'.format(
profile_name,
', '.join('\'{0}\''.format(node) for node in nodes)
)
)
log.error(msg)
if 'errors' not in ret:
ret['errors'] = {}
ret['errors'][profile_name] = msg
continue
profile_data = self.opts['profiles'].get(profile_name)
for nodename, overrides in six.iteritems(nodes):
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
if 'provider' in overrides and overrides['provider'] != profile_data['provider']:
alias, driver = overrides.get('provider').split(':')
else:
alias, driver = profile_data.get('provider').split(':')
provider_details = copy.deepcopy(self.opts['providers'][alias][driver])
del provider_details['profiles']
# Update the provider details information with profile data
# Profile data and node overrides should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
salt.utils.dictupdate.update(provider_details, profile_data)
nodedata = copy.deepcopy(provider_details)
# Update profile data with the map overrides
for setting in ('grains', 'master', 'minion', 'volumes',
'requires'):
deprecated = 'map_{0}'.format(setting)
if deprecated in overrides:
log.warning(
'The use of \'%s\' on the \'%s\' mapping has '
'been deprecated. The preferred way now is to '
'just define \'%s\'. For now, salt-cloud will do '
'the proper thing and convert the deprecated '
'mapping into the preferred one.',
deprecated, nodename, setting
)
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if 'minion' in overrides and \
'minion' in nodedata and \
'grains' in overrides['minion'] and \
'grains' in nodedata['minion']:
nodedata['minion']['grains'].update(
overrides['minion']['grains']
)
del overrides['minion']['grains']
# remove minion key if now is empty dict
if not overrides['minion']:
del overrides['minion']
nodedata = salt.utils.dictupdate.update(nodedata, overrides)
# Add the computed information to the return data
ret['create'][nodename] = nodedata
# Add the node name to the defined set
alias, driver = nodedata['provider'].split(':')
defined.add((alias, driver, nodename))
def get_matching_by_name(name):
matches = {}
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for vm_name, details in six.iteritems(vms):
if vm_name == name and driver not in matches:
matches[driver] = details['state']
return matches
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for name, details in six.iteritems(vms):
exist.add((alias, driver, name))
if name not in ret['create']:
continue
# The machine is set to be created. Does it already exist?
matching = get_matching_by_name(name)
if not matching:
continue
# A machine by the same name exists
for item in matching:
if name not in ret['create']:
# Machine already removed
break
log.warning("'%s' already exists, removing from "
'the create map.', name)
if 'existing' not in ret:
ret['existing'] = {}
ret['existing'][name] = ret['create'].pop(name)
if 'hard' in self.opts and self.opts['hard']:
if self.opts['enable_hard_maps'] is False:
raise SaltCloudSystemExit(
'The --hard map can be extremely dangerous to use, '
'and therefore must explicitly be enabled in the main '
'configuration file, by setting \'enable_hard_maps\' '
'to True'
)
# Hard maps are enabled, Look for the items to delete.
ret['destroy'] = exist.difference(defined)
return ret
|
Create a data map of what to execute on
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L1905-L2029
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n",
"def map_providers_parallel(self, query='list_nodes', cached=False):\n '''\n Return a mapping of what named VMs are running on what VM providers\n based on what providers are defined in the configuration and VMs\n\n Same as map_providers but query in parallel.\n '''\n if cached is True and query in self.__cached_provider_queries:\n return self.__cached_provider_queries[query]\n\n opts = self.opts.copy()\n multiprocessing_data = []\n\n # Optimize Providers\n opts['providers'] = self._optimize_providers(opts['providers'])\n for alias, drivers in six.iteritems(opts['providers']):\n # Make temp query for this driver to avoid overwrite next\n this_query = query\n for driver, details in six.iteritems(drivers):\n # If driver has function list_nodes_min, just replace it\n # with query param to check existing vms on this driver\n # for minimum information, Otherwise still use query param.\n if opts.get('selected_query_option') is None and '{0}.list_nodes_min'.format(driver) in self.clouds:\n this_query = 'list_nodes_min'\n\n fun = '{0}.{1}'.format(driver, this_query)\n if fun not in self.clouds:\n log.error('Public cloud provider %s is not available', driver)\n continue\n\n multiprocessing_data.append({\n 'fun': fun,\n 'opts': opts,\n 'query': this_query,\n 'alias': alias,\n 'driver': driver\n })\n output = {}\n if not multiprocessing_data:\n return output\n\n data_count = len(multiprocessing_data)\n pool = multiprocessing.Pool(data_count < 10 and data_count or 10,\n init_pool_worker)\n parallel_pmap = enter_mainloop(_run_parallel_map_providers_query,\n multiprocessing_data,\n pool=pool)\n for alias, driver, details in parallel_pmap:\n if not details:\n # There's no providers details?! Skip it!\n continue\n if alias not in output:\n output[alias] = {}\n output[alias][driver] = details\n\n self.__cached_provider_queries[query] = output\n return output\n",
"def get_matching_by_name(name):\n matches = {}\n for alias, drivers in six.iteritems(pmap):\n for driver, vms in six.iteritems(drivers):\n for vm_name, details in six.iteritems(vms):\n if vm_name == name and driver not in matches:\n matches[driver] = details['state']\n return matches\n"
] |
class Map(Cloud):
'''
Create a VM stateful map execution object
'''
def __init__(self, opts):
Cloud.__init__(self, opts)
self.rendered_map = self.read()
def interpolated_map(self, query='list_nodes', cached=False):
rendered_map = self.read().copy()
interpolated_map = {}
for profile, mapped_vms in six.iteritems(rendered_map):
names = set(mapped_vms)
if profile not in self.opts['profiles']:
if 'Errors' not in interpolated_map:
interpolated_map['Errors'] = {}
msg = (
'No provider for the mapped \'{0}\' profile was found. '
'Skipped VMS: {1}'.format(
profile, ', '.join(names)
)
)
log.info(msg)
interpolated_map['Errors'][profile] = msg
continue
matching = self.get_running_by_names(names, query, cached)
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = vm_details
try:
names.remove(vm_name)
except KeyError:
# If it's not there, then our job is already done
pass
if not names:
continue
profile_details = self.opts['profiles'][profile]
alias, driver = profile_details['provider'].split(':')
for vm_name in names:
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = 'Absent'
return interpolated_map
def delete_map(self, query=None):
query_map = self.interpolated_map(query=query)
for alias, drivers in six.iteritems(query_map.copy()):
for driver, vms in six.iteritems(drivers.copy()):
for vm_name, vm_details in six.iteritems(vms.copy()):
if vm_details == 'Absent':
query_map[alias][driver].pop(vm_name)
if not query_map[alias][driver]:
query_map[alias].pop(driver)
if not query_map[alias]:
query_map.pop(alias)
return query_map
def get_vmnames_by_action(self, action):
query_map = self.interpolated_map("list_nodes")
matching_states = {
'start': ['stopped'],
'stop': ['running', 'active'],
'reboot': ['running', 'active'],
}
vm_names = []
for alias, drivers in six.iteritems(query_map):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
# Only certain actions are support in to use in this case. Those actions are the
# "Global" salt-cloud actions defined in the "matching_states" dictionary above.
# If a more specific action is passed in, we shouldn't stack-trace - exit gracefully.
try:
state_action = matching_states[action]
except KeyError:
log.error(
'The use of \'%s\' as an action is not supported '
'in this context. Only \'start\', \'stop\', and '
'\'reboot\' are supported options.', action
)
raise SaltCloudException()
if vm_details != 'Absent' and vm_details['state'].lower() in state_action:
vm_names.append(vm_name)
return vm_names
def read(self):
'''
Read in the specified map and return the map structure
'''
map_ = None
if self.opts.get('map', None) is None:
if self.opts.get('map_data', None) is None:
if self.opts.get('map_pillar', None) is None:
pass
elif self.opts.get('map_pillar') not in self.opts.get('maps'):
log.error(
'The specified map not found in pillar at '
'\'cloud:maps:%s\'', self.opts['map_pillar']
)
raise SaltCloudNotFound()
else:
# 'map_pillar' is provided, try to use it
map_ = self.opts['maps'][self.opts.get('map_pillar')]
else:
# 'map_data' is provided, try to use it
map_ = self.opts['map_data']
else:
# 'map' is provided, try to use it
local_minion_opts = copy.deepcopy(self.opts)
local_minion_opts['file_client'] = 'local'
self.minion = salt.minion.MasterMinion(local_minion_opts)
if not os.path.isfile(self.opts['map']):
if not (self.opts['map']).startswith('salt://'):
log.error(
'The specified map file does not exist: \'%s\'',
self.opts['map']
)
raise SaltCloudNotFound()
if (self.opts['map']).startswith('salt://'):
cached_map = self.minion.functions['cp.cache_file'](self.opts['map'])
else:
cached_map = self.opts['map']
try:
renderer = self.opts.get('renderer', 'jinja|yaml')
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
map_ = compile_template(
cached_map, rend, renderer, blacklist, whitelist
)
except Exception as exc:
log.error(
'Rendering map %s failed, render error:\n%s',
self.opts['map'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return {}
if 'include' in map_:
map_ = salt.config.include_config(
map_, self.opts['map'], verbose=False
)
if not map_:
return {}
# Create expected data format if needed
for profile, mapped in six.iteritems(map_.copy()):
if isinstance(mapped, (list, tuple)):
entries = {}
for mapping in mapped:
if isinstance(mapping, six.string_types):
# Foo:
# - bar1
# - bar2
mapping = {mapping: None}
for name, overrides in six.iteritems(mapping):
if overrides is None or isinstance(overrides, bool):
# Foo:
# - bar1:
# - bar2:
overrides = {}
try:
overrides.setdefault('name', name)
except AttributeError:
log.error(
'Cannot use \'name\' as a minion id in a cloud map as it '
'is a reserved word. Please change \'name\' to a different '
'minion id reference.'
)
return {}
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, dict):
# Convert the dictionary mapping to a list of dictionaries
# Foo:
# bar1:
# grains:
# foo: bar
# bar2:
# grains:
# foo: bar
entries = {}
for name, overrides in six.iteritems(mapped):
overrides.setdefault('name', name)
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, six.string_types):
# If it's a single string entry, let's make iterable because of
# the next step
mapped = [mapped]
map_[profile] = {}
for name in mapped:
map_[profile][name] = {'name': name}
return map_
def _has_loop(self, dmap, seen=None, val=None):
if seen is None:
for values in six.itervalues(dmap['create']):
seen = []
try:
machines = values['requires']
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
else:
if val in seen:
return True
seen.append(val)
try:
machines = dmap['create'][val]['requires']
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
return False
def _calcdep(self, dmap, machine, data, level):
try:
deplist = data['requires']
except KeyError:
return level
levels = []
for name in deplist:
try:
data = dmap['create'][name]
except KeyError:
try:
data = dmap['existing'][name]
except KeyError:
msg = 'Missing dependency in cloud map'
log.error(msg)
raise SaltCloudException(msg)
levels.append(self._calcdep(dmap, name, data, level))
level = max(levels) + 1
return level
def run_map(self, dmap):
'''
Execute the contents of the VM map
'''
if self._has_loop(dmap):
msg = 'Uh-oh, that cloud map has a dependency loop!'
log.error(msg)
raise SaltCloudException(msg)
# Go through the create list and calc dependencies
for key, val in six.iteritems(dmap['create']):
log.info('Calculating dependencies for %s', key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order %s for %s', level, key)
dmap['create'][key]['level'] = level
try:
existing_list = six.iteritems(dmap['existing'])
except KeyError:
existing_list = six.iteritems({})
for key, val in existing_list:
log.info('Calculating dependencies for %s', key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order %s for %s', level, key)
dmap['existing'][key]['level'] = level
# Now sort the create list based on dependencies
create_list = sorted(six.iteritems(dmap['create']), key=lambda x: x[1]['level'])
full_map = dmap['create'].copy()
if 'existing' in dmap:
full_map.update(dmap['existing'])
possible_master_list = sorted(six.iteritems(full_map), key=lambda x: x[1]['level'])
output = {}
if self.opts['parallel']:
parallel_data = []
master_name = None
master_minion_name = None
master_host = None
master_finger = None
for name, profile in possible_master_list:
if profile.get('make_master', False) is True:
master_name = name
master_profile = profile
if master_name:
# If the master already exists, get the host
if master_name not in dmap['create']:
master_host = self.client.query()
for provider_part in master_profile['provider'].split(':'):
master_host = master_host[provider_part]
master_host = master_host[master_name][master_profile.get('ssh_interface', 'public_ips')]
if not master_host:
raise SaltCloudSystemExit(
'Could not get the hostname of master {}.'.format(master_name)
)
# Otherwise, deploy it as a new master
else:
master_minion_name = master_name
log.debug('Creating new master \'%s\'', master_name)
if salt.config.get_cloud_config_value(
'deploy',
master_profile,
self.opts
) is False:
raise SaltCloudSystemExit(
'Cannot proceed with \'make_master\' when salt deployment '
'is disabled(ex: --no-deploy).'
)
# Generate the master keys
log.debug('Generating master keys for \'%s\'', master_profile['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
master_profile,
self.opts
)
)
master_profile['master_pub'] = pub
master_profile['master_pem'] = priv
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_temp_pub = salt.utils.files.mkstemp()
with salt.utils.files.fopen(master_temp_pub, 'w') as mtp:
mtp.write(pub)
master_finger = salt.utils.crypt.pem_finger(master_temp_pub, sum_type=self.opts['hash_type'])
os.unlink(master_temp_pub)
if master_profile.get('make_minion', True) is True:
master_profile.setdefault('minion', {})
if 'id' in master_profile['minion']:
master_minion_name = master_profile['minion']['id']
# Set this minion's master as local if the user has not set it
if 'master' not in master_profile['minion']:
master_profile['minion']['master'] = '127.0.0.1'
if master_finger is not None:
master_profile['master_finger'] = master_finger
# Generate the minion keys to pre-seed the master:
for name, profile in create_list:
make_minion = salt.config.get_cloud_config_value(
'make_minion', profile, self.opts, default=True
)
if make_minion is False:
continue
log.debug('Generating minion keys for \'%s\'', profile['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
profile,
self.opts
)
)
profile['pub_key'] = pub
profile['priv_key'] = priv
# Store the minion's public key in order to be pre-seeded in
# the master
master_profile.setdefault('preseed_minion_keys', {})
master_profile['preseed_minion_keys'].update({name: pub})
local_master = False
if master_profile['minion'].get('local_master', False) and \
master_profile['minion'].get('master', None) is not None:
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
out = self.create(master_profile, local_master=local_master)
if not isinstance(out, dict):
log.debug('Master creation details is not a dictionary: %s', out)
elif 'Errors' in out:
raise SaltCloudSystemExit(
'An error occurred while creating the master, not '
'continuing: {0}'.format(out['Errors'])
)
deploy_kwargs = (
self.opts.get('show_deploy_args', False) is True and
# Get the needed data
out.get('deploy_kwargs', {}) or
# Strip the deploy_kwargs from the returned data since we don't
# want it shown in the console.
out.pop('deploy_kwargs', {})
)
master_host = deploy_kwargs.get('salt_host', deploy_kwargs.get('host', None))
if master_host is None:
raise SaltCloudSystemExit(
'Host for new master {0} was not found, '
'aborting map'.format(
master_name
)
)
output[master_name] = out
else:
log.debug('No make_master found in map')
# Local master?
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_pub = os.path.join(self.opts['pki_dir'], 'master.pub')
if os.path.isfile(master_pub):
master_finger = salt.utils.crypt.pem_finger(master_pub, sum_type=self.opts['hash_type'])
opts = self.opts.copy()
if self.opts['parallel']:
# Force display_ssh_output to be False since the console will
# need to be reset afterwards
log.info(
'Since parallel deployment is in use, ssh console output '
'is disabled. All ssh output will be logged though'
)
opts['display_ssh_output'] = False
local_master = master_name is None
for name, profile in create_list:
if name in (master_name, master_minion_name):
# Already deployed, it's the master's minion
continue
if 'minion' in profile and profile['minion'].get('local_master', False) and \
profile['minion'].get('master', None) is not None:
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
if master_finger is not None and local_master is False:
profile['master_finger'] = master_finger
if master_host is not None:
profile.setdefault('minion', {})
profile['minion'].setdefault('master', master_host)
if self.opts['parallel']:
parallel_data.append({
'opts': opts,
'name': name,
'profile': profile,
'local_master': local_master
})
continue
# Not deploying in parallel
try:
output[name] = self.create(
profile, local_master=local_master
)
if self.opts.get('show_deploy_args', False) is False \
and 'deploy_kwargs' in output \
and isinstance(output[name], dict):
output[name].pop('deploy_kwargs', None)
except SaltCloudException as exc:
log.error(
'Failed to deploy \'%s\'. Error: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
output[name] = {'Error': str(exc)}
for name in dmap.get('destroy', ()):
output[name] = self.destroy(name)
if self.opts['parallel'] and parallel_data:
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Cloud pool size: %s', pool_size)
output_multip = enter_mainloop(
_create_multiprocessing, parallel_data, pool_size=pool_size)
# We have deployed in parallel, now do start action in
# correct order based on dependencies.
if self.opts['start_action']:
actionlist = []
grp = -1
for key, val in groupby(six.itervalues(dmap['create']), lambda x: x['level']):
actionlist.append([])
grp += 1
for item in val:
actionlist[grp].append(item['name'])
out = {}
for group in actionlist:
log.info('Running %s on %s', self.opts['start_action'], ', '.join(group))
client = salt.client.get_local_client()
out.update(client.cmd(
','.join(group), self.opts['start_action'],
timeout=self.opts['timeout'] * 60, tgt_type='list'
))
for obj in output_multip:
next(six.itervalues(obj))['ret'] = out[next(six.iterkeys(obj))]
output.update(obj)
else:
for obj in output_multip:
output.update(obj)
return output
|
saltstack/salt
|
salt/cloud/__init__.py
|
Map.run_map
|
python
|
def run_map(self, dmap):
'''
Execute the contents of the VM map
'''
if self._has_loop(dmap):
msg = 'Uh-oh, that cloud map has a dependency loop!'
log.error(msg)
raise SaltCloudException(msg)
# Go through the create list and calc dependencies
for key, val in six.iteritems(dmap['create']):
log.info('Calculating dependencies for %s', key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order %s for %s', level, key)
dmap['create'][key]['level'] = level
try:
existing_list = six.iteritems(dmap['existing'])
except KeyError:
existing_list = six.iteritems({})
for key, val in existing_list:
log.info('Calculating dependencies for %s', key)
level = 0
level = self._calcdep(dmap, key, val, level)
log.debug('Got execution order %s for %s', level, key)
dmap['existing'][key]['level'] = level
# Now sort the create list based on dependencies
create_list = sorted(six.iteritems(dmap['create']), key=lambda x: x[1]['level'])
full_map = dmap['create'].copy()
if 'existing' in dmap:
full_map.update(dmap['existing'])
possible_master_list = sorted(six.iteritems(full_map), key=lambda x: x[1]['level'])
output = {}
if self.opts['parallel']:
parallel_data = []
master_name = None
master_minion_name = None
master_host = None
master_finger = None
for name, profile in possible_master_list:
if profile.get('make_master', False) is True:
master_name = name
master_profile = profile
if master_name:
# If the master already exists, get the host
if master_name not in dmap['create']:
master_host = self.client.query()
for provider_part in master_profile['provider'].split(':'):
master_host = master_host[provider_part]
master_host = master_host[master_name][master_profile.get('ssh_interface', 'public_ips')]
if not master_host:
raise SaltCloudSystemExit(
'Could not get the hostname of master {}.'.format(master_name)
)
# Otherwise, deploy it as a new master
else:
master_minion_name = master_name
log.debug('Creating new master \'%s\'', master_name)
if salt.config.get_cloud_config_value(
'deploy',
master_profile,
self.opts
) is False:
raise SaltCloudSystemExit(
'Cannot proceed with \'make_master\' when salt deployment '
'is disabled(ex: --no-deploy).'
)
# Generate the master keys
log.debug('Generating master keys for \'%s\'', master_profile['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
master_profile,
self.opts
)
)
master_profile['master_pub'] = pub
master_profile['master_pem'] = priv
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_temp_pub = salt.utils.files.mkstemp()
with salt.utils.files.fopen(master_temp_pub, 'w') as mtp:
mtp.write(pub)
master_finger = salt.utils.crypt.pem_finger(master_temp_pub, sum_type=self.opts['hash_type'])
os.unlink(master_temp_pub)
if master_profile.get('make_minion', True) is True:
master_profile.setdefault('minion', {})
if 'id' in master_profile['minion']:
master_minion_name = master_profile['minion']['id']
# Set this minion's master as local if the user has not set it
if 'master' not in master_profile['minion']:
master_profile['minion']['master'] = '127.0.0.1'
if master_finger is not None:
master_profile['master_finger'] = master_finger
# Generate the minion keys to pre-seed the master:
for name, profile in create_list:
make_minion = salt.config.get_cloud_config_value(
'make_minion', profile, self.opts, default=True
)
if make_minion is False:
continue
log.debug('Generating minion keys for \'%s\'', profile['name'])
priv, pub = salt.utils.cloud.gen_keys(
salt.config.get_cloud_config_value(
'keysize',
profile,
self.opts
)
)
profile['pub_key'] = pub
profile['priv_key'] = priv
# Store the minion's public key in order to be pre-seeded in
# the master
master_profile.setdefault('preseed_minion_keys', {})
master_profile['preseed_minion_keys'].update({name: pub})
local_master = False
if master_profile['minion'].get('local_master', False) and \
master_profile['minion'].get('master', None) is not None:
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
out = self.create(master_profile, local_master=local_master)
if not isinstance(out, dict):
log.debug('Master creation details is not a dictionary: %s', out)
elif 'Errors' in out:
raise SaltCloudSystemExit(
'An error occurred while creating the master, not '
'continuing: {0}'.format(out['Errors'])
)
deploy_kwargs = (
self.opts.get('show_deploy_args', False) is True and
# Get the needed data
out.get('deploy_kwargs', {}) or
# Strip the deploy_kwargs from the returned data since we don't
# want it shown in the console.
out.pop('deploy_kwargs', {})
)
master_host = deploy_kwargs.get('salt_host', deploy_kwargs.get('host', None))
if master_host is None:
raise SaltCloudSystemExit(
'Host for new master {0} was not found, '
'aborting map'.format(
master_name
)
)
output[master_name] = out
else:
log.debug('No make_master found in map')
# Local master?
# Generate the fingerprint of the master pubkey in order to
# mitigate man-in-the-middle attacks
master_pub = os.path.join(self.opts['pki_dir'], 'master.pub')
if os.path.isfile(master_pub):
master_finger = salt.utils.crypt.pem_finger(master_pub, sum_type=self.opts['hash_type'])
opts = self.opts.copy()
if self.opts['parallel']:
# Force display_ssh_output to be False since the console will
# need to be reset afterwards
log.info(
'Since parallel deployment is in use, ssh console output '
'is disabled. All ssh output will be logged though'
)
opts['display_ssh_output'] = False
local_master = master_name is None
for name, profile in create_list:
if name in (master_name, master_minion_name):
# Already deployed, it's the master's minion
continue
if 'minion' in profile and profile['minion'].get('local_master', False) and \
profile['minion'].get('master', None) is not None:
# The minion is explicitly defining a master and it's
# explicitly saying it's the local one
local_master = True
if master_finger is not None and local_master is False:
profile['master_finger'] = master_finger
if master_host is not None:
profile.setdefault('minion', {})
profile['minion'].setdefault('master', master_host)
if self.opts['parallel']:
parallel_data.append({
'opts': opts,
'name': name,
'profile': profile,
'local_master': local_master
})
continue
# Not deploying in parallel
try:
output[name] = self.create(
profile, local_master=local_master
)
if self.opts.get('show_deploy_args', False) is False \
and 'deploy_kwargs' in output \
and isinstance(output[name], dict):
output[name].pop('deploy_kwargs', None)
except SaltCloudException as exc:
log.error(
'Failed to deploy \'%s\'. Error: %s',
name, exc, exc_info_on_loglevel=logging.DEBUG
)
output[name] = {'Error': str(exc)}
for name in dmap.get('destroy', ()):
output[name] = self.destroy(name)
if self.opts['parallel'] and parallel_data:
if 'pool_size' in self.opts:
pool_size = self.opts['pool_size']
else:
pool_size = len(parallel_data)
log.info('Cloud pool size: %s', pool_size)
output_multip = enter_mainloop(
_create_multiprocessing, parallel_data, pool_size=pool_size)
# We have deployed in parallel, now do start action in
# correct order based on dependencies.
if self.opts['start_action']:
actionlist = []
grp = -1
for key, val in groupby(six.itervalues(dmap['create']), lambda x: x['level']):
actionlist.append([])
grp += 1
for item in val:
actionlist[grp].append(item['name'])
out = {}
for group in actionlist:
log.info('Running %s on %s', self.opts['start_action'], ', '.join(group))
client = salt.client.get_local_client()
out.update(client.cmd(
','.join(group), self.opts['start_action'],
timeout=self.opts['timeout'] * 60, tgt_type='list'
))
for obj in output_multip:
next(six.itervalues(obj))['ret'] = out[next(six.iterkeys(obj))]
output.update(obj)
else:
for obj in output_multip:
output.update(obj)
return output
|
Execute the contents of the VM map
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/__init__.py#L2031-L2293
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def pem_finger(path=None, key=None, sum_type='sha256'):\n '''\n Pass in either a raw pem string, or the path on disk to the location of a\n pem file, and the type of cryptographic hash to use. The default is SHA256.\n The fingerprint of the pem will be returned.\n\n If neither a key nor a path are passed in, a blank string will be returned.\n '''\n if not key:\n if not os.path.isfile(path):\n return ''\n\n with salt.utils.files.fopen(path, 'rb') as fp_:\n key = b''.join([x for x in fp_.readlines() if x.strip()][1:-1])\n\n pre = getattr(hashlib, sum_type)(key).hexdigest()\n finger = ''\n for ind, _ in enumerate(pre):\n if ind % 2:\n # Is odd\n finger += '{0}:'.format(pre[ind])\n else:\n finger += pre[ind]\n return finger.rstrip(':')\n",
"def _has_loop(self, dmap, seen=None, val=None):\n if seen is None:\n for values in six.itervalues(dmap['create']):\n seen = []\n try:\n machines = values['requires']\n except KeyError:\n machines = []\n for machine in machines:\n if self._has_loop(dmap, seen=list(seen), val=machine):\n return True\n else:\n if val in seen:\n return True\n\n seen.append(val)\n try:\n machines = dmap['create'][val]['requires']\n except KeyError:\n machines = []\n\n for machine in machines:\n if self._has_loop(dmap, seen=list(seen), val=machine):\n return True\n return False\n"
] |
class Map(Cloud):
'''
Create a VM stateful map execution object
'''
def __init__(self, opts):
Cloud.__init__(self, opts)
self.rendered_map = self.read()
def interpolated_map(self, query='list_nodes', cached=False):
rendered_map = self.read().copy()
interpolated_map = {}
for profile, mapped_vms in six.iteritems(rendered_map):
names = set(mapped_vms)
if profile not in self.opts['profiles']:
if 'Errors' not in interpolated_map:
interpolated_map['Errors'] = {}
msg = (
'No provider for the mapped \'{0}\' profile was found. '
'Skipped VMS: {1}'.format(
profile, ', '.join(names)
)
)
log.info(msg)
interpolated_map['Errors'][profile] = msg
continue
matching = self.get_running_by_names(names, query, cached)
for alias, drivers in six.iteritems(matching):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = vm_details
try:
names.remove(vm_name)
except KeyError:
# If it's not there, then our job is already done
pass
if not names:
continue
profile_details = self.opts['profiles'][profile]
alias, driver = profile_details['provider'].split(':')
for vm_name in names:
if alias not in interpolated_map:
interpolated_map[alias] = {}
if driver not in interpolated_map[alias]:
interpolated_map[alias][driver] = {}
interpolated_map[alias][driver][vm_name] = 'Absent'
return interpolated_map
def delete_map(self, query=None):
query_map = self.interpolated_map(query=query)
for alias, drivers in six.iteritems(query_map.copy()):
for driver, vms in six.iteritems(drivers.copy()):
for vm_name, vm_details in six.iteritems(vms.copy()):
if vm_details == 'Absent':
query_map[alias][driver].pop(vm_name)
if not query_map[alias][driver]:
query_map[alias].pop(driver)
if not query_map[alias]:
query_map.pop(alias)
return query_map
def get_vmnames_by_action(self, action):
query_map = self.interpolated_map("list_nodes")
matching_states = {
'start': ['stopped'],
'stop': ['running', 'active'],
'reboot': ['running', 'active'],
}
vm_names = []
for alias, drivers in six.iteritems(query_map):
for driver, vms in six.iteritems(drivers):
for vm_name, vm_details in six.iteritems(vms):
# Only certain actions are support in to use in this case. Those actions are the
# "Global" salt-cloud actions defined in the "matching_states" dictionary above.
# If a more specific action is passed in, we shouldn't stack-trace - exit gracefully.
try:
state_action = matching_states[action]
except KeyError:
log.error(
'The use of \'%s\' as an action is not supported '
'in this context. Only \'start\', \'stop\', and '
'\'reboot\' are supported options.', action
)
raise SaltCloudException()
if vm_details != 'Absent' and vm_details['state'].lower() in state_action:
vm_names.append(vm_name)
return vm_names
def read(self):
'''
Read in the specified map and return the map structure
'''
map_ = None
if self.opts.get('map', None) is None:
if self.opts.get('map_data', None) is None:
if self.opts.get('map_pillar', None) is None:
pass
elif self.opts.get('map_pillar') not in self.opts.get('maps'):
log.error(
'The specified map not found in pillar at '
'\'cloud:maps:%s\'', self.opts['map_pillar']
)
raise SaltCloudNotFound()
else:
# 'map_pillar' is provided, try to use it
map_ = self.opts['maps'][self.opts.get('map_pillar')]
else:
# 'map_data' is provided, try to use it
map_ = self.opts['map_data']
else:
# 'map' is provided, try to use it
local_minion_opts = copy.deepcopy(self.opts)
local_minion_opts['file_client'] = 'local'
self.minion = salt.minion.MasterMinion(local_minion_opts)
if not os.path.isfile(self.opts['map']):
if not (self.opts['map']).startswith('salt://'):
log.error(
'The specified map file does not exist: \'%s\'',
self.opts['map']
)
raise SaltCloudNotFound()
if (self.opts['map']).startswith('salt://'):
cached_map = self.minion.functions['cp.cache_file'](self.opts['map'])
else:
cached_map = self.opts['map']
try:
renderer = self.opts.get('renderer', 'jinja|yaml')
rend = salt.loader.render(self.opts, {})
blacklist = self.opts.get('renderer_blacklist')
whitelist = self.opts.get('renderer_whitelist')
map_ = compile_template(
cached_map, rend, renderer, blacklist, whitelist
)
except Exception as exc:
log.error(
'Rendering map %s failed, render error:\n%s',
self.opts['map'], exc,
exc_info_on_loglevel=logging.DEBUG
)
return {}
if 'include' in map_:
map_ = salt.config.include_config(
map_, self.opts['map'], verbose=False
)
if not map_:
return {}
# Create expected data format if needed
for profile, mapped in six.iteritems(map_.copy()):
if isinstance(mapped, (list, tuple)):
entries = {}
for mapping in mapped:
if isinstance(mapping, six.string_types):
# Foo:
# - bar1
# - bar2
mapping = {mapping: None}
for name, overrides in six.iteritems(mapping):
if overrides is None or isinstance(overrides, bool):
# Foo:
# - bar1:
# - bar2:
overrides = {}
try:
overrides.setdefault('name', name)
except AttributeError:
log.error(
'Cannot use \'name\' as a minion id in a cloud map as it '
'is a reserved word. Please change \'name\' to a different '
'minion id reference.'
)
return {}
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, dict):
# Convert the dictionary mapping to a list of dictionaries
# Foo:
# bar1:
# grains:
# foo: bar
# bar2:
# grains:
# foo: bar
entries = {}
for name, overrides in six.iteritems(mapped):
overrides.setdefault('name', name)
entries[name] = overrides
map_[profile] = entries
continue
if isinstance(mapped, six.string_types):
# If it's a single string entry, let's make iterable because of
# the next step
mapped = [mapped]
map_[profile] = {}
for name in mapped:
map_[profile][name] = {'name': name}
return map_
def _has_loop(self, dmap, seen=None, val=None):
if seen is None:
for values in six.itervalues(dmap['create']):
seen = []
try:
machines = values['requires']
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
else:
if val in seen:
return True
seen.append(val)
try:
machines = dmap['create'][val]['requires']
except KeyError:
machines = []
for machine in machines:
if self._has_loop(dmap, seen=list(seen), val=machine):
return True
return False
def _calcdep(self, dmap, machine, data, level):
try:
deplist = data['requires']
except KeyError:
return level
levels = []
for name in deplist:
try:
data = dmap['create'][name]
except KeyError:
try:
data = dmap['existing'][name]
except KeyError:
msg = 'Missing dependency in cloud map'
log.error(msg)
raise SaltCloudException(msg)
levels.append(self._calcdep(dmap, name, data, level))
level = max(levels) + 1
return level
def map_data(self, cached=False):
'''
Create a data map of what to execute on
'''
ret = {'create': {}}
pmap = self.map_providers_parallel(cached=cached)
exist = set()
defined = set()
rendered_map = copy.deepcopy(self.rendered_map)
for profile_name, nodes in six.iteritems(rendered_map):
if profile_name not in self.opts['profiles']:
msg = (
'The required profile, \'{0}\', defined in the map '
'does not exist. The defined nodes, {1}, will not '
'be created.'.format(
profile_name,
', '.join('\'{0}\''.format(node) for node in nodes)
)
)
log.error(msg)
if 'errors' not in ret:
ret['errors'] = {}
ret['errors'][profile_name] = msg
continue
profile_data = self.opts['profiles'].get(profile_name)
for nodename, overrides in six.iteritems(nodes):
# Get associated provider data, in case something like size
# or image is specified in the provider file. See issue #32510.
if 'provider' in overrides and overrides['provider'] != profile_data['provider']:
alias, driver = overrides.get('provider').split(':')
else:
alias, driver = profile_data.get('provider').split(':')
provider_details = copy.deepcopy(self.opts['providers'][alias][driver])
del provider_details['profiles']
# Update the provider details information with profile data
# Profile data and node overrides should override provider data, if defined.
# This keeps map file data definitions consistent with -p usage.
salt.utils.dictupdate.update(provider_details, profile_data)
nodedata = copy.deepcopy(provider_details)
# Update profile data with the map overrides
for setting in ('grains', 'master', 'minion', 'volumes',
'requires'):
deprecated = 'map_{0}'.format(setting)
if deprecated in overrides:
log.warning(
'The use of \'%s\' on the \'%s\' mapping has '
'been deprecated. The preferred way now is to '
'just define \'%s\'. For now, salt-cloud will do '
'the proper thing and convert the deprecated '
'mapping into the preferred one.',
deprecated, nodename, setting
)
overrides[setting] = overrides.pop(deprecated)
# merge minion grains from map file
if 'minion' in overrides and \
'minion' in nodedata and \
'grains' in overrides['minion'] and \
'grains' in nodedata['minion']:
nodedata['minion']['grains'].update(
overrides['minion']['grains']
)
del overrides['minion']['grains']
# remove minion key if now is empty dict
if not overrides['minion']:
del overrides['minion']
nodedata = salt.utils.dictupdate.update(nodedata, overrides)
# Add the computed information to the return data
ret['create'][nodename] = nodedata
# Add the node name to the defined set
alias, driver = nodedata['provider'].split(':')
defined.add((alias, driver, nodename))
def get_matching_by_name(name):
matches = {}
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for vm_name, details in six.iteritems(vms):
if vm_name == name and driver not in matches:
matches[driver] = details['state']
return matches
for alias, drivers in six.iteritems(pmap):
for driver, vms in six.iteritems(drivers):
for name, details in six.iteritems(vms):
exist.add((alias, driver, name))
if name not in ret['create']:
continue
# The machine is set to be created. Does it already exist?
matching = get_matching_by_name(name)
if not matching:
continue
# A machine by the same name exists
for item in matching:
if name not in ret['create']:
# Machine already removed
break
log.warning("'%s' already exists, removing from "
'the create map.', name)
if 'existing' not in ret:
ret['existing'] = {}
ret['existing'][name] = ret['create'].pop(name)
if 'hard' in self.opts and self.opts['hard']:
if self.opts['enable_hard_maps'] is False:
raise SaltCloudSystemExit(
'The --hard map can be extremely dangerous to use, '
'and therefore must explicitly be enabled in the main '
'configuration file, by setting \'enable_hard_maps\' '
'to True'
)
# Hard maps are enabled, Look for the items to delete.
ret['destroy'] = exist.difference(defined)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
_get_user_info
|
python
|
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
|
Wrapper for user.info Salt function
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L104-L122
|
[
"def _get_user_info(user=None):\n '''\n Wrapper for user.info Salt function\n '''\n if not user:\n # Get user Salt runnining as\n user = __salt__['config.option']('user')\n\n userinfo = __salt__['user.info'](user)\n\n if not userinfo:\n if user == 'salt':\n # Special case with `salt` user:\n # if it doesn't exist then fall back to user Salt running as\n userinfo = _get_user_info()\n else:\n raise SaltInvocationError('User {0} does not exist'.format(user))\n\n return userinfo\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
_get_user_gnupghome
|
python
|
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
|
Return default GnuPG home directory path for a user
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L125-L134
|
[
"def _get_user_info(user=None):\n '''\n Wrapper for user.info Salt function\n '''\n if not user:\n # Get user Salt runnining as\n user = __salt__['config.option']('user')\n\n userinfo = __salt__['user.info'](user)\n\n if not userinfo:\n if user == 'salt':\n # Special case with `salt` user:\n # if it doesn't exist then fall back to user Salt running as\n userinfo = _get_user_info()\n else:\n raise SaltInvocationError('User {0} does not exist'.format(user))\n\n return userinfo\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
_create_gpg
|
python
|
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
|
Create the GPG object
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L176-L188
|
[
"def _get_user_gnupghome(user):\n '''\n Return default GnuPG home directory path for a user\n '''\n if user == 'salt':\n gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')\n else:\n gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')\n\n return gnupghome\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
_list_keys
|
python
|
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
|
Helper function for Listing keys
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L191-L197
|
[
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
_search_keys
|
python
|
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
|
Helper function for searching keys from keyserver
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L200-L209
|
[
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
search_keys
|
python
|
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
|
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L212-L262
|
[
"def _search_keys(text, keyserver, user=None):\n '''\n Helper function for searching keys from keyserver\n '''\n gpg = _create_gpg(user)\n if keyserver:\n _keys = gpg.search_keys(text, keyserver)\n else:\n _keys = gpg.search_keys(text)\n return _keys\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
list_keys
|
python
|
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
|
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L265-L309
|
[
"def _list_keys(user=None, gnupghome=None, secret=False):\n '''\n Helper function for Listing keys\n '''\n gpg = _create_gpg(user, gnupghome)\n _keys = gpg.list_keys(secret)\n return _keys\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
create_key
|
python
|
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
|
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L360-L477
|
[
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
delete_key
|
python
|
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
|
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L480-L555
|
[
"def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):\n '''\n Get a key from the GPG keychain\n\n keyid\n The key ID (short or long) of the key to be retrieved.\n\n fingerprint\n The fingerprint of the key to be retrieved.\n\n user\n Which user's keychain to access, defaults to user Salt is running as.\n Passing the user as ``salt`` will set the GnuPG home directory to the\n ``/etc/salt/gpgkeys``.\n\n gnupghome\n Specify the location where GPG keyring and related files are stored.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' gpg.get_key keyid=3FAD9F1E\n\n salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192\n\n salt '*' gpg.get_key keyid=3FAD9F1E user=username\n\n '''\n tmp = {}\n for _key in _list_keys(user, gnupghome):\n if (_key['fingerprint'] == fingerprint or\n _key['keyid'] == keyid or\n _key['keyid'][8:] == keyid):\n tmp['keyid'] = _key['keyid']\n tmp['fingerprint'] = _key['fingerprint']\n tmp['uids'] = _key['uids']\n\n expires = _key.get('expires', None)\n date = _key.get('date', None)\n length = _key.get('length', None)\n owner_trust = _key.get('ownertrust', None)\n trust = _key.get('trust', None)\n\n if expires:\n tmp['expires'] = time.strftime('%Y-%m-%d',\n time.localtime(float(_key['expires'])))\n if date:\n tmp['created'] = time.strftime('%Y-%m-%d',\n time.localtime(float(_key['date'])))\n if length:\n tmp['keyLength'] = _key['length']\n if owner_trust:\n tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]\n if trust:\n tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]\n if not tmp:\n return False\n else:\n return tmp\n",
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n",
"def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):\n '''\n Get a key from the GPG keychain\n\n keyid\n The key ID (short or long) of the key to be retrieved.\n\n fingerprint\n The fingerprint of the key to be retrieved.\n\n user\n Which user's keychain to access, defaults to user Salt is running as.\n Passing the user as ``salt`` will set the GnuPG home directory to the\n ``/etc/salt/gpgkeys``.\n\n gnupghome\n Specify the location where GPG keyring and related files are stored.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' gpg.get_secret_key keyid=3FAD9F1E\n\n salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192\n\n salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username\n\n '''\n tmp = {}\n for _key in _list_keys(user, gnupghome, secret=True):\n if (_key['fingerprint'] == fingerprint or\n _key['keyid'] == keyid or\n _key['keyid'][8:] == keyid):\n tmp['keyid'] = _key['keyid']\n tmp['fingerprint'] = _key['fingerprint']\n tmp['uids'] = _key['uids']\n\n expires = _key.get('expires', None)\n date = _key.get('date', None)\n length = _key.get('length', None)\n owner_trust = _key.get('ownertrust', None)\n trust = _key.get('trust', None)\n\n if expires:\n tmp['expires'] = time.strftime('%Y-%m-%d',\n time.localtime(float(_key['expires'])))\n if date:\n tmp['created'] = time.strftime('%Y-%m-%d',\n time.localtime(float(_key['date'])))\n if length:\n tmp['keyLength'] = _key['length']\n if owner_trust:\n tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]\n if trust:\n tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]\n if not tmp:\n return False\n else:\n return tmp\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
get_key
|
python
|
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
|
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L558-L617
|
[
"def _list_keys(user=None, gnupghome=None, secret=False):\n '''\n Helper function for Listing keys\n '''\n gpg = _create_gpg(user, gnupghome)\n _keys = gpg.list_keys(secret)\n return _keys\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
import_key
|
python
|
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
|
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L683-L755
|
[
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
export_key
|
python
|
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
|
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L758-L794
|
[
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
receive_keys
|
python
|
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
|
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L798-L851
|
[
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
trust_key
|
python
|
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
|
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L854-L951
|
[
"def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):\n '''\n Get a key from the GPG keychain\n\n keyid\n The key ID (short or long) of the key to be retrieved.\n\n fingerprint\n The fingerprint of the key to be retrieved.\n\n user\n Which user's keychain to access, defaults to user Salt is running as.\n Passing the user as ``salt`` will set the GnuPG home directory to the\n ``/etc/salt/gpgkeys``.\n\n gnupghome\n Specify the location where GPG keyring and related files are stored.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' gpg.get_key keyid=3FAD9F1E\n\n salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192\n\n salt '*' gpg.get_key keyid=3FAD9F1E user=username\n\n '''\n tmp = {}\n for _key in _list_keys(user, gnupghome):\n if (_key['fingerprint'] == fingerprint or\n _key['keyid'] == keyid or\n _key['keyid'][8:] == keyid):\n tmp['keyid'] = _key['keyid']\n tmp['fingerprint'] = _key['fingerprint']\n tmp['uids'] = _key['uids']\n\n expires = _key.get('expires', None)\n date = _key.get('date', None)\n length = _key.get('length', None)\n owner_trust = _key.get('ownertrust', None)\n trust = _key.get('trust', None)\n\n if expires:\n tmp['expires'] = time.strftime('%Y-%m-%d',\n time.localtime(float(_key['expires'])))\n if date:\n tmp['created'] = time.strftime('%Y-%m-%d',\n time.localtime(float(_key['date'])))\n if length:\n tmp['keyLength'] = _key['length']\n if owner_trust:\n tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]\n if trust:\n tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]\n if not tmp:\n return False\n else:\n return tmp\n",
"def _gpg():\n '''\n Returns the path to the gpg binary\n '''\n # Get the path to the gpg binary.\n return salt.utils.path.which('gpg')\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
sign
|
python
|
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
|
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L954-L1027
|
[
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
verify
|
python
|
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
|
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L1030-L1117
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
encrypt
|
python
|
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L1120-L1223
|
[
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/modules/gpg.py
|
decrypt
|
python
|
def decrypt(user=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.decrypt(text, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase, output=output)
else:
result = gpg.decrypt_file(_fp, passphrase=gpg_passphrase)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Decrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
Decrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
text
The encrypted text to decrypt.
filename
The encrypted filename to decrypt.
output
The filename where the decrypted data will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) decrypted block as a string without the
standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.decrypt filename='/path/to/important.file.gpg'
salt '*' gpg.decrypt filename='/path/to/important.file.gpg' use_passphrase=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gpg.py#L1226-L1311
|
[
"def _create_gpg(user=None, gnupghome=None):\n '''\n Create the GPG object\n '''\n if not gnupghome:\n gnupghome = _get_user_gnupghome(user)\n\n if GPG_1_3_1:\n gpg = gnupg.GPG(homedir=gnupghome)\n else:\n gpg = gnupg.GPG(gnupghome=gnupghome)\n\n return gpg\n"
] |
# -*- coding: utf-8 -*-
'''
Manage a GPG keychains, add keys, create keys, retrieve keys from keyservers.
Sign, encrypt and sign plus encrypt text and files.
.. versionadded:: 2015.5.0
.. note::
The ``python-gnupg`` library and ``gpg`` binary are required to be
installed.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import functools
import logging
import os
import re
import time
# Import salt libs
import salt.utils.files
import salt.utils.path
from salt.exceptions import SaltInvocationError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd-party libs
from salt.ext import six
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'gpg'
LETTER_TRUST_DICT = {
'e': 'Expired',
'q': 'Unknown',
'n': 'Not Trusted',
'f': 'Fully Trusted',
'm': 'Marginally Trusted',
'u': 'Ultimately Trusted',
'-': 'Unknown',
}
NUM_TRUST_DICT = {
'expired': '1',
'unknown': '2',
'not_trusted': '3',
'marginally': '4',
'fully': '5',
'ultimately': '6',
}
INV_NUM_TRUST_DICT = {
'1': 'Expired',
'2': 'Unknown',
'3': 'Not Trusted',
'4': 'Marginally',
'5': 'Fully Trusted',
'6': 'Ultimately Trusted'
}
VERIFY_TRUST_LEVELS = {
'0': 'Undefined',
'1': 'Never',
'2': 'Marginal',
'3': 'Fully',
'4': 'Ultimate'
}
GPG_1_3_1 = False
try:
import gnupg
HAS_GPG_BINDINGS = True
GPG_1_3_1 = _LooseVersion(gnupg.__version__) >= _LooseVersion('1.3.1')
except ImportError:
HAS_GPG_BINDINGS = False
def _gpg():
'''
Returns the path to the gpg binary
'''
# Get the path to the gpg binary.
return salt.utils.path.which('gpg')
def __virtual__():
'''
Makes sure that python-gnupg and gpg are available.
'''
if not _gpg():
return (False, 'The gpg execution module cannot be loaded: '
'gpg binary is not in the path.')
return __virtualname__ if HAS_GPG_BINDINGS \
else (False, 'The gpg execution module cannot be loaded; the '
'gnupg python module is not installed.')
def _get_user_info(user=None):
'''
Wrapper for user.info Salt function
'''
if not user:
# Get user Salt runnining as
user = __salt__['config.option']('user')
userinfo = __salt__['user.info'](user)
if not userinfo:
if user == 'salt':
# Special case with `salt` user:
# if it doesn't exist then fall back to user Salt running as
userinfo = _get_user_info()
else:
raise SaltInvocationError('User {0} does not exist'.format(user))
return userinfo
def _get_user_gnupghome(user):
'''
Return default GnuPG home directory path for a user
'''
if user == 'salt':
gnupghome = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
else:
gnupghome = os.path.join(_get_user_info(user)['home'], '.gnupg')
return gnupghome
def _restore_ownership(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
'''
Wrap gpg function calls to fix permissions
'''
user = kwargs.get('user')
gnupghome = kwargs.get('gnupghome')
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
userinfo = _get_user_info(user)
run_user = _get_user_info()
if userinfo['uid'] != run_user['uid'] and os.path.exists(gnupghome):
# Given user is different from one who runs Salt process,
# need to fix ownership permissions for GnuPG home dir
group = __salt__['file.gid_to_group'](run_user['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, run_user['name'], group)
# Filter special kwargs
for key in list(kwargs):
if key.startswith('__'):
del kwargs[key]
ret = func(*args, **kwargs)
if userinfo['uid'] != run_user['uid']:
group = __salt__['file.gid_to_group'](userinfo['gid'])
for path in [gnupghome] + __salt__['file.find'](gnupghome):
__salt__['file.chown'](path, user, group)
return ret
return func_wrapper
def _create_gpg(user=None, gnupghome=None):
'''
Create the GPG object
'''
if not gnupghome:
gnupghome = _get_user_gnupghome(user)
if GPG_1_3_1:
gpg = gnupg.GPG(homedir=gnupghome)
else:
gpg = gnupg.GPG(gnupghome=gnupghome)
return gpg
def _list_keys(user=None, gnupghome=None, secret=False):
'''
Helper function for Listing keys
'''
gpg = _create_gpg(user, gnupghome)
_keys = gpg.list_keys(secret)
return _keys
def _search_keys(text, keyserver, user=None):
'''
Helper function for searching keys from keyserver
'''
gpg = _create_gpg(user)
if keyserver:
_keys = gpg.search_keys(text, keyserver)
else:
_keys = gpg.search_keys(text)
return _keys
def search_keys(text, keyserver=None, user=None):
'''
Search keys from keyserver
text
Text to search the keyserver for, e.g. email address, keyID or fingerprint.
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.search_keys user@example.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com
salt '*' gpg.search_keys user@example.com keyserver=keyserver.ubuntu.com user=username
'''
if GPG_1_3_1:
raise SaltInvocationError('The search_keys function is not support with this version of python-gnupg.')
else:
if not keyserver:
keyserver = 'pgp.mit.edu'
_keys = []
for _key in _search_keys(text, keyserver, user):
tmp = {'keyid': _key['keyid'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
_keys.append(tmp)
return _keys
def list_keys(user=None, gnupghome=None):
'''
List keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
def list_secret_keys(user=None, gnupghome=None):
'''
List secret keys in GPG keychain
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.list_secret_keys
'''
_keys = []
for _key in _list_keys(user, gnupghome, secret=True):
tmp = {'keyid': _key['keyid'],
'fingerprint': _key['fingerprint'],
'uids': _key['uids']}
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
_keys.append(tmp)
return _keys
@_restore_ownership
def create_key(key_type='RSA',
key_length=1024,
name_real='Autogenerated Key',
name_comment='Generated by SaltStack',
name_email=None,
subkey_type=None,
subkey_length=None,
expire_date=None,
use_passphrase=False,
user=None,
gnupghome=None):
'''
Create a key in the GPG keychain
.. note::
GPG key generation requires *a lot* of entropy and randomness.
Difficult to do over a remote connection, consider having
another process available which is generating randomness for
the machine. Also especially difficult on virtual machines,
consider the `rng-tools
<http://www.gnu.org/software/hurd/user/tlecarrour/rng-tools.html>`_
package.
The create_key process takes awhile so increasing the timeout
may be necessary, e.g. -t 15.
key_type
The type of the primary key to generate. It must be capable of signing.
'RSA' or 'DSA'.
key_length
The length of the primary key in bits.
name_real
The real name of the user identity which is represented by the key.
name_comment
A comment to attach to the user id.
name_email
An email address for the user.
subkey_type
The type of the secondary key to generate.
subkey_length
The length of the secondary key in bits.
expire_date
The expiration date for the primary and any secondary key.
You can specify an ISO date, A number of days/weeks/months/years,
an epoch value, or 0 for a non-expiring key.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt -t 15 '*' gpg.create_key
'''
ret = {
'res': True,
'fingerprint': '',
'message': ''
}
create_params = {'key_type': key_type,
'key_length': key_length,
'name_real': name_real,
'name_comment': name_comment,
}
gpg = _create_gpg(user, gnupghome)
if name_email:
create_params['name_email'] = name_email
if subkey_type:
create_params['subkey_type'] = subkey_type
if subkey_length:
create_params['subkey_length'] = subkey_length
if expire_date:
create_params['expire_date'] = expire_date
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
ret['res'] = False
ret['message'] = "gpg_passphrase not available in pillar."
return ret
else:
create_params['passphrase'] = gpg_passphrase
input_data = gpg.gen_key_input(**create_params)
key = gpg.gen_key(input_data)
if key.fingerprint:
ret['fingerprint'] = key.fingerprint
ret['message'] = 'GPG key pair successfully generated.'
else:
ret['res'] = False
ret['message'] = 'Unable to generate GPG key pair.'
return ret
def delete_key(keyid=None,
fingerprint=None,
delete_secret=False,
user=None,
gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The keyid of the key to be deleted.
fingerprint
The fingerprint of the key to be deleted.
delete_secret
Whether to delete a corresponding secret key prior to deleting the public key.
Secret keys must be deleted before deleting any corresponding public keys.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.delete_key keyid=3FAD9F1E
salt '*' gpg.delete_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.delete_key keyid=3FAD9F1E user=username
salt '*' gpg.delete_key keyid=3FAD9F1E user=username delete_secret=True
'''
ret = {
'res': True,
'message': ''
}
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint and not keyid:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
gpg = _create_gpg(user, gnupghome)
key = get_key(keyid, fingerprint, user)
if key:
fingerprint = key['fingerprint']
skey = get_secret_key(keyid, fingerprint, user)
if skey and not delete_secret:
ret['res'] = False
ret['message'] = 'Secret key exists, delete first or pass delete_secret=True.'
return ret
elif skey and delete_secret and six.text_type(gpg.delete_keys(fingerprint, True)) == 'ok':
# Delete the secret key
ret['message'] = 'Secret key for {0} deleted\n'.format(fingerprint)
# Delete the public key
if six.text_type(gpg.delete_keys(fingerprint)) == 'ok':
ret['message'] += 'Public key for {0} deleted'.format(fingerprint)
ret['res'] = True
return ret
else:
ret['res'] = False
ret['message'] = 'Key not available in keychain.'
return ret
def get_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_key keyid=3FAD9F1E
salt '*' gpg.get_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
def get_secret_key(keyid=None, fingerprint=None, user=None, gnupghome=None):
'''
Get a key from the GPG keychain
keyid
The key ID (short or long) of the key to be retrieved.
fingerprint
The fingerprint of the key to be retrieved.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.get_secret_key keyid=3FAD9F1E
salt '*' gpg.get_secret_key fingerprint=53C96788253E58416D20BCD352952C84C3252192
salt '*' gpg.get_secret_key keyid=3FAD9F1E user=username
'''
tmp = {}
for _key in _list_keys(user, gnupghome, secret=True):
if (_key['fingerprint'] == fingerprint or
_key['keyid'] == keyid or
_key['keyid'][8:] == keyid):
tmp['keyid'] = _key['keyid']
tmp['fingerprint'] = _key['fingerprint']
tmp['uids'] = _key['uids']
expires = _key.get('expires', None)
date = _key.get('date', None)
length = _key.get('length', None)
owner_trust = _key.get('ownertrust', None)
trust = _key.get('trust', None)
if expires:
tmp['expires'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['expires'])))
if date:
tmp['created'] = time.strftime('%Y-%m-%d',
time.localtime(float(_key['date'])))
if length:
tmp['keyLength'] = _key['length']
if owner_trust:
tmp['ownerTrust'] = LETTER_TRUST_DICT[_key['ownertrust']]
if trust:
tmp['trust'] = LETTER_TRUST_DICT[_key['trust']]
if not tmp:
return False
else:
return tmp
@_restore_ownership
def import_key(text=None,
filename=None,
user=None,
gnupghome=None):
r'''
Import a key from text or file
text
The text containing to import.
filename
The filename containing the key to import.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.import_key text='-----BEGIN PGP PUBLIC KEY BLOCK-----\n ... -----END PGP PUBLIC KEY BLOCK-----'
salt '*' gpg.import_key filename='/path/to/public-key-file'
'''
ret = {
'res': True,
'message': ''
}
gpg = _create_gpg(user, gnupghome)
if not text and not filename:
raise SaltInvocationError('filename or text must be passed.')
if filename:
try:
with salt.utils.files.flopen(filename, 'rb') as _fp:
lines = _fp.readlines()
text = ''.join(lines)
except IOError:
raise SaltInvocationError('filename does not exist.')
imported_data = gpg.import_keys(text)
if GPG_1_3_1:
counts = imported_data.counts
if counts.get('imported') or counts.get('imported_rsa'):
ret['message'] = 'Successfully imported key(s).'
elif counts.get('unchanged'):
ret['message'] = 'Key(s) already exist in keychain.'
elif counts.get('not_imported'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not counts.get('count'):
ret['res'] = False
ret['message'] = 'Unable to import key.'
else:
if imported_data.imported or imported_data.imported_rsa:
ret['message'] = 'Successfully imported key(s).'
elif imported_data.unchanged:
ret['message'] = 'Key(s) already exist in keychain.'
elif imported_data.not_imported:
ret['res'] = False
ret['message'] = 'Unable to import key.'
elif not imported_data.count:
ret['res'] = False
ret['message'] = 'Unable to import key.'
return ret
def export_key(keyids=None, secret=False, user=None, gnupghome=None):
'''
Export a key from the GPG keychain
keyids
The key ID(s) of the key(s) to be exported. Can be specified as a comma
separated string or a list. Anything which GnuPG itself accepts to
identify a key - for example, the key ID or the fingerprint could be
used.
secret
Export the secret key identified by the ``keyids`` information passed.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.export_key keyids=3FAD9F1E
salt '*' gpg.export_key keyids=3FAD9F1E secret=True
salt '*' gpg.export_key keyids="['3FAD9F1E','3FBD8F1E']" user=username
'''
gpg = _create_gpg(user, gnupghome)
if isinstance(keyids, six.string_types):
keyids = keyids.split(',')
return gpg.export_keys(keyids, secret)
@_restore_ownership
def receive_keys(keyserver=None, keys=None, user=None, gnupghome=None):
'''
Receive key(s) from keyserver and add them to keychain
keyserver
Keyserver to use for searching for GPG keys, defaults to pgp.mit.edu
keys
The keyID(s) to retrieve from the keyserver. Can be specified as a comma
separated string or a list.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.receive_keys keys='3FAD9F1E'
salt '*' gpg.receive_keys keys="['3FAD9F1E','3FBD9F2E']"
salt '*' gpg.receive_keys keys=3FAD9F1E user=username
'''
ret = {
'res': True,
'changes': {},
'message': []
}
gpg = _create_gpg(user, gnupghome)
if not keyserver:
keyserver = 'pgp.mit.edu'
if isinstance(keys, six.string_types):
keys = keys.split(',')
recv_data = gpg.recv_keys(keyserver, *keys)
for result in recv_data.results:
if 'ok' in result:
if result['ok'] == '1':
ret['message'].append('Key {0} added to keychain'.format(result['fingerprint']))
elif result['ok'] == '0':
ret['message'].append('Key {0} already exists in keychain'.format(result['fingerprint']))
elif 'problem' in result:
ret['message'].append('Unable to add key to keychain')
return ret
def trust_key(keyid=None,
fingerprint=None,
trust_level=None,
user=None):
'''
Set the trust level for a key in GPG keychain
keyid
The keyid of the key to set the trust level for.
fingerprint
The fingerprint of the key to set the trust level for.
trust_level
The trust level to set for the specified key, must be one
of the following:
expired, unknown, not_trusted, marginally, fully, ultimately
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
CLI Example:
.. code-block:: bash
salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally'
salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted'
salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
'''
ret = {
'res': True,
'message': ''
}
_VALID_TRUST_LEVELS = ['expired', 'unknown',
'not_trusted', 'marginally',
'fully', 'ultimately']
if fingerprint and keyid:
ret['res'] = False
ret['message'] = 'Only specify one argument, fingerprint or keyid'
return ret
if not fingerprint:
if keyid:
key = get_key(keyid, user=user)
if key:
if 'fingerprint' not in key:
ret['res'] = False
ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid)
return ret
fingerprint = key['fingerprint']
else:
ret['res'] = False
ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid)
return ret
else:
ret['res'] = False
ret['message'] = 'Required argument, fingerprint or keyid'
return ret
if trust_level not in _VALID_TRUST_LEVELS:
return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS))
stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level])
cmd = [_gpg(), '--import-ownertrust']
_user = user
if user == 'salt':
homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys')
cmd.extend(['--homedir', homeDir])
_user = 'root'
res = __salt__['cmd.run_all'](cmd,
stdin=stdin,
runas=_user,
python_shell=False)
if not res['retcode'] == 0:
ret['res'] = False
ret['message'] = res['stderr']
else:
if res['stderr']:
_match = re.findall(r'\d', res['stderr'])
if len(_match) == 2:
ret['fingerprint'] = fingerprint
ret['message'] = 'Changing ownership trust from {0} to {1}.'.format(
INV_NUM_TRUST_DICT[_match[0]],
INV_NUM_TRUST_DICT[_match[1]]
)
else:
ret['fingerprint'] = fingerprint
ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]])
else:
ret['message'] = res['stderr']
return ret
def sign(user=None,
keyid=None,
text=None,
filename=None,
output=None,
use_passphrase=False,
gnupghome=None):
'''
Sign message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
keyid
The keyid of the key to set the trust level for, defaults to
first key in the secret keyring.
text
The text to sign.
filename
The filename to sign.
output
The filename where the signed file will be written, default is standard out.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
CLI Example:
.. code-block:: bash
salt '*' gpg.sign text='Hello there. How are you?'
salt '*' gpg.sign filename='/path/to/important.file'
salt '*' gpg.sign filename='/path/to/important.file' use_passphrase=True
'''
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
else:
gpg_passphrase = None
# Check for at least one secret key to sign with
gnupg_version = _LooseVersion(gnupg.__version__)
if text:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign(text, keyid=keyid, passphrase=gpg_passphrase)
elif filename:
with salt.utils.files.flopen(filename, 'rb') as _fp:
if gnupg_version >= '1.3.1':
signed_data = gpg.sign(text, default_key=keyid, passphrase=gpg_passphrase)
else:
signed_data = gpg.sign_file(_fp, keyid=keyid, passphrase=gpg_passphrase)
if output:
with salt.utils.files.flopen(output, 'w') as fout:
fout.write(signed_data.data)
else:
raise SaltInvocationError('filename or text must be passed.')
return signed_data.data
def verify(text=None,
user=None,
filename=None,
gnupghome=None,
signature=None,
trustmodel=None):
'''
Verify a message or file
text
The text to verify.
filename
The filename to verify.
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
gnupghome
Specify the location where GPG keyring and related files are stored.
signature
Specify the filename of a detached signature.
.. versionadded:: 2018.3.0
trustmodel
Explicitly define the used trust model. One of:
- pgp
- classic
- tofu
- tofu+pgp
- direct
- always
- auto
.. versionadded:: fluorine
CLI Example:
.. code-block:: bash
salt '*' gpg.verify text='Hello there. How are you?'
salt '*' gpg.verify filename='/path/to/important.file'
salt '*' gpg.verify filename='/path/to/important.file' use_passphrase=True
salt '*' gpg.verify filename='/path/to/important.file' trustmodel=direct
'''
gpg = _create_gpg(user)
trustmodels = ('pgp', 'classic', 'tofu', 'tofu+pgp', 'direct', 'always', 'auto')
if trustmodel and trustmodel not in trustmodels:
msg = 'Invalid trustmodel defined: {}. Use one of: {}'.format(trustmodel, ', '.join(trustmodels))
log.warn(msg)
return {'res': False, 'message': msg}
extra_args = []
if trustmodel:
extra_args.extend(['--trust-model', trustmodel])
if text:
verified = gpg.verify(text, extra_args=extra_args)
elif filename:
if signature:
# need to call with fopen instead of flopen due to:
# https://bitbucket.org/vinay.sajip/python-gnupg/issues/76/verify_file-closes-passed-file-handle
with salt.utils.files.fopen(signature, 'rb') as _fp:
verified = gpg.verify_file(_fp, filename, extra_args=extra_args)
else:
with salt.utils.files.flopen(filename, 'rb') as _fp:
verified = gpg.verify_file(_fp, extra_args=extra_args)
else:
raise SaltInvocationError('filename or text must be passed.')
ret = {}
if verified.trust_level is not None:
ret['res'] = True
ret['username'] = verified.username
ret['key_id'] = verified.key_id
ret['trust_level'] = VERIFY_TRUST_LEVELS[six.text_type(verified.trust_level)]
ret['message'] = 'The signature is verified.'
else:
ret['res'] = False
ret['message'] = 'The signature could not be verified.'
return ret
def encrypt(user=None,
recipients=None,
text=None,
filename=None,
output=None,
sign=None,
use_passphrase=False,
gnupghome=None,
bare=False):
'''
Encrypt a message or file
user
Which user's keychain to access, defaults to user Salt is running as.
Passing the user as ``salt`` will set the GnuPG home directory to the
``/etc/salt/gpgkeys``.
recipients
The fingerprints for those recipient whom the data is being encrypted for.
text
The text to encrypt.
filename
The filename to encrypt.
output
The filename where the signed file will be written, default is standard out.
sign
Whether to sign, in addition to encrypt, the data. ``True`` to use
default key or fingerprint to specify a different key to sign with.
use_passphrase
Whether to use a passphrase with the signing key. Passphrase is received
from Pillar.
gnupghome
Specify the location where GPG keyring and related files are stored.
bare
If ``True``, return the (armored) encrypted block as a string without
the standard comment/res dict.
CLI Example:
.. code-block:: bash
salt '*' gpg.encrypt text='Hello there. How are you?'
salt '*' gpg.encrypt filename='/path/to/important.file'
salt '*' gpg.encrypt filename='/path/to/important.file' use_passphrase=True
'''
ret = {
'res': True,
'comment': ''
}
gpg = _create_gpg(user, gnupghome)
if use_passphrase:
gpg_passphrase = __salt__['pillar.get']('gpg_passphrase')
if not gpg_passphrase:
raise SaltInvocationError('gpg_passphrase not available in pillar.')
gpg_passphrase = gpg_passphrase['gpg_passphrase']
else:
gpg_passphrase = None
if text:
result = gpg.encrypt(text, recipients, passphrase=gpg_passphrase)
elif filename:
if GPG_1_3_1:
# This version does not allow us to encrypt using the
# file stream # have to read in the contents and encrypt.
with salt.utils.files.flopen(filename, 'rb') as _fp:
_contents = _fp.read()
result = gpg.encrypt(_contents, recipients, passphrase=gpg_passphrase, output=output)
else:
# This version allows encrypting the file stream
with salt.utils.files.flopen(filename, 'rb') as _fp:
if output:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, output=output, sign=sign)
else:
result = gpg.encrypt_file(_fp, recipients, passphrase=gpg_passphrase, sign=sign)
else:
raise SaltInvocationError('filename or text must be passed.')
if result.ok:
if not bare:
if output:
ret['comment'] = 'Encrypted data has been written to {0}'.format(output)
else:
ret['comment'] = result.data
else:
ret = result.data
else:
if not bare:
ret['res'] = False
ret['comment'] = '{0}.\nPlease check the salt-minion log.'.format(result.status)
else:
ret = False
log.error(result.stderr)
return ret
|
saltstack/salt
|
salt/utils/msgpack.py
|
pack
|
python
|
def pack(o, stream, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.pack and ensures that the passed object is unwrapped if it is
a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
orig_enc_func = kwargs.pop('default', lambda x: x)
def _enc_func(obj):
obj = ThreadLocalProxy.unproxy(obj)
return orig_enc_func(obj)
return msgpack_module.pack(o, stream, default=_enc_func, **kwargs)
|
.. versionadded:: 2018.3.4
Wraps msgpack.pack and ensures that the passed object is unwrapped if it is
a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/msgpack.py#L20-L38
| null |
# -*- coding: utf-8 -*-
'''
Functions to work with MessagePack
'''
from __future__ import absolute_import
# Import Python libs
try:
# Attempt to import msgpack
import msgpack
except ImportError:
# Fall back to msgpack_pure
import msgpack_pure as msgpack # pylint: disable=import-error
# Import Salt libs
from salt.utils.thread_local_proxy import ThreadLocalProxy
def packb(o, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.packb and ensures that the passed object is unwrapped if it
is a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
orig_enc_func = kwargs.pop('default', lambda x: x)
def _enc_func(obj):
obj = ThreadLocalProxy.unproxy(obj)
return orig_enc_func(obj)
return msgpack_module.packb(o, default=_enc_func, **kwargs)
def unpack(stream, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
return msgpack_module.unpack(stream, **kwargs)
def unpackb(packed, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
return msgpack_module.unpackb(packed, **kwargs)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
|
saltstack/salt
|
salt/utils/msgpack.py
|
unpack
|
python
|
def unpack(stream, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
return msgpack_module.unpack(stream, **kwargs)
|
.. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/msgpack.py#L62-L73
| null |
# -*- coding: utf-8 -*-
'''
Functions to work with MessagePack
'''
from __future__ import absolute_import
# Import Python libs
try:
# Attempt to import msgpack
import msgpack
except ImportError:
# Fall back to msgpack_pure
import msgpack_pure as msgpack # pylint: disable=import-error
# Import Salt libs
from salt.utils.thread_local_proxy import ThreadLocalProxy
def pack(o, stream, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.pack and ensures that the passed object is unwrapped if it is
a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
orig_enc_func = kwargs.pop('default', lambda x: x)
def _enc_func(obj):
obj = ThreadLocalProxy.unproxy(obj)
return orig_enc_func(obj)
return msgpack_module.pack(o, stream, default=_enc_func, **kwargs)
def packb(o, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.packb and ensures that the passed object is unwrapped if it
is a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
orig_enc_func = kwargs.pop('default', lambda x: x)
def _enc_func(obj):
obj = ThreadLocalProxy.unproxy(obj)
return orig_enc_func(obj)
return msgpack_module.packb(o, default=_enc_func, **kwargs)
def unpackb(packed, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
return msgpack_module.unpackb(packed, **kwargs)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
|
saltstack/salt
|
salt/utils/msgpack.py
|
unpackb
|
python
|
def unpackb(packed, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
return msgpack_module.unpackb(packed, **kwargs)
|
.. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/msgpack.py#L76-L87
| null |
# -*- coding: utf-8 -*-
'''
Functions to work with MessagePack
'''
from __future__ import absolute_import
# Import Python libs
try:
# Attempt to import msgpack
import msgpack
except ImportError:
# Fall back to msgpack_pure
import msgpack_pure as msgpack # pylint: disable=import-error
# Import Salt libs
from salt.utils.thread_local_proxy import ThreadLocalProxy
def pack(o, stream, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.pack and ensures that the passed object is unwrapped if it is
a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
orig_enc_func = kwargs.pop('default', lambda x: x)
def _enc_func(obj):
obj = ThreadLocalProxy.unproxy(obj)
return orig_enc_func(obj)
return msgpack_module.pack(o, stream, default=_enc_func, **kwargs)
def packb(o, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.packb and ensures that the passed object is unwrapped if it
is a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
orig_enc_func = kwargs.pop('default', lambda x: x)
def _enc_func(obj):
obj = ThreadLocalProxy.unproxy(obj)
return orig_enc_func(obj)
return msgpack_module.packb(o, default=_enc_func, **kwargs)
def unpack(stream, **kwargs):
'''
.. versionadded:: 2018.3.4
Wraps msgpack.unpack.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
'''
msgpack_module = kwargs.pop('_msgpack_module', msgpack)
return msgpack_module.unpack(stream, **kwargs)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
|
saltstack/salt
|
salt/thorium/key.py
|
timeout
|
python
|
def timeout(name, delete=0, reject=0):
'''
If any minion's status is older than the timeout value then apply the
given action to the timed out key. This example will remove keys to
minions that have not checked in for 300 seconds (5 minutes)
USAGE:
.. code-block:: yaml
statreg:
status.reg
clean_keys:
key.timeout:
- require:
- status: statreg
- delete: 300
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
now = time.time()
ktr = 'key_start_tracker'
if ktr not in __context__:
__context__[ktr] = {}
remove = set()
reject_set = set()
keyapi = _get_key_api()
current = keyapi.list_status('acc')
for id_ in current.get('minions', []):
if id_ in __reg__['status']['val']:
# minion is reporting, check timeout and mark for removal
if delete and (now - __reg__['status']['val'][id_]['recv_time']) > delete:
remove.add(id_)
if reject and (now - __reg__['status']['val'][id_]['recv_time']) > reject:
reject_set.add(id_)
else:
# No report from minion recorded, mark for change if thorium has
# been running for longer than the timeout
if id_ not in __context__[ktr]:
__context__[ktr][id_] = now
else:
if delete and (now - __context__[ktr][id_]) > delete:
remove.add(id_)
if reject and (now - __context__[ktr][id_]) > reject:
reject_set.add(id_)
for id_ in remove:
keyapi.delete_key(id_)
__reg__['status']['val'].pop(id_, None)
__context__[ktr].pop(id_, None)
for id_ in reject_set:
keyapi.reject(id_)
__reg__['status']['val'].pop(id_, None)
__context__[ktr].pop(id_, None)
return ret
|
If any minion's status is older than the timeout value then apply the
given action to the timed out key. This example will remove keys to
minions that have not checked in for 300 seconds (5 minutes)
USAGE:
.. code-block:: yaml
statreg:
status.reg
clean_keys:
key.timeout:
- require:
- status: statreg
- delete: 300
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/thorium/key.py#L24-L80
|
[
"def _get_key_api():\n '''\n Return the key api hook\n '''\n if 'keyapi' not in __context__:\n __context__['keyapi'] = salt.key.Key(__opts__)\n return __context__['keyapi']\n"
] |
# -*- coding: utf-8 -*-
'''
The key Thorium State is used to apply changes to the accepted/rejected/pending keys
.. versionadded:: 2016.11.0
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import time
# Import salt libs
import salt.key
def _get_key_api():
'''
Return the key api hook
'''
if 'keyapi' not in __context__:
__context__['keyapi'] = salt.key.Key(__opts__)
return __context__['keyapi']
|
saltstack/salt
|
salt/beacons/inotify.py
|
_get_notifier
|
python
|
def _get_notifier(config):
'''
Check the context for the notifier and construct it if not present
'''
if 'inotify.notifier' not in __context__:
__context__['inotify.queue'] = collections.deque()
wm = pyinotify.WatchManager()
__context__['inotify.notifier'] = pyinotify.Notifier(wm, _enqueue)
if ('coalesce' in config and
isinstance(config['coalesce'], bool) and
config['coalesce']):
__context__['inotify.notifier'].coalesce_events()
return __context__['inotify.notifier']
|
Check the context for the notifier and construct it if not present
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/inotify.py#L70-L82
| null |
# -*- coding: utf-8 -*-
'''
Watch files and translate the changes into salt events
:depends: - pyinotify Python module >= 0.9.5
:Caution: Using generic mask options like open, access, ignored, and
closed_nowrite with reactors can easily cause the reactor
to loop on itself. To mitigate this behavior, consider
setting the `disable_during_state_run` flag to `True` in
the beacon configuration.
:note: The `inotify` beacon only works on OSes that have `inotify`
kernel support.
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import collections
import fnmatch
import logging
import os
import re
# Import salt libs
import salt.ext.six
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
# Import third party libs
try:
import pyinotify
HAS_PYINOTIFY = True
DEFAULT_MASK = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY
MASKS = {}
for var in dir(pyinotify):
if var.startswith('IN_'):
key = var[3:].lower()
MASKS[key] = getattr(pyinotify, var)
except ImportError:
HAS_PYINOTIFY = False
DEFAULT_MASK = None
__virtualname__ = 'inotify'
log = logging.getLogger(__name__)
def __virtual__():
if HAS_PYINOTIFY:
return __virtualname__
return False
def _get_mask(mask):
'''
Return the int that represents the mask
'''
return MASKS.get(mask, 0)
def _enqueue(revent):
'''
Enqueue the event
'''
__context__['inotify.queue'].append(revent)
def validate(config):
'''
Validate the beacon configuration
'''
VALID_MASK = [
'access',
'attrib',
'close_nowrite',
'close_write',
'create',
'delete',
'delete_self',
'excl_unlink',
'ignored',
'modify',
'moved_from',
'moved_to',
'move_self',
'oneshot',
'onlydir',
'open',
'unmount'
]
# Configuration for inotify beacon should be a dict of dicts
if not isinstance(config, list):
return False, 'Configuration for inotify beacon must be a list.'
else:
_config = {}
list(map(_config.update, config))
if 'files' not in _config:
return False, 'Configuration for inotify beacon must include files.'
else:
for path in _config.get('files'):
if not isinstance(_config['files'][path], dict):
return False, ('Configuration for inotify beacon must '
'be a list of dictionaries.')
else:
if not any(j in ['mask',
'recurse',
'auto_add'] for j in _config['files'][path]):
return False, ('Configuration for inotify beacon must '
'contain mask, recurse or auto_add items.')
if 'auto_add' in _config['files'][path]:
if not isinstance(_config['files'][path]['auto_add'], bool):
return False, ('Configuration for inotify beacon '
'auto_add must be boolean.')
if 'recurse' in _config['files'][path]:
if not isinstance(_config['files'][path]['recurse'], bool):
return False, ('Configuration for inotify beacon '
'recurse must be boolean.')
if 'mask' in _config['files'][path]:
if not isinstance(_config['files'][path]['mask'], list):
return False, ('Configuration for inotify beacon '
'mask must be list.')
for mask in _config['files'][path]['mask']:
if mask not in VALID_MASK:
return False, ('Configuration for inotify beacon '
'invalid mask option {0}.'.format(mask))
return True, 'Valid beacon configuration'
def beacon(config):
'''
Watch the configured files
Example Config
.. code-block:: yaml
beacons:
inotify:
- files:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
- coalesce: True
The mask list can contain the following events (the default mask is create,
delete, and modify):
* access - File accessed
* attrib - File metadata changed
* close_nowrite - Unwritable file closed
* close_write - Writable file closed
* create - File created in watched directory
* delete - File deleted from watched directory
* delete_self - Watched file or directory deleted
* modify - File modified
* moved_from - File moved out of watched directory
* moved_to - File moved into watched directory
* move_self - Watched file moved
* open - File opened
The mask can also contain the following options:
* dont_follow - Don't dereference symbolic links
* excl_unlink - Omit events for children after they have been unlinked
* oneshot - Remove watch after one event
* onlydir - Operate only if name is directory
recurse:
Recursively watch files in the directory
auto_add:
Automatically start watching files that are created in the watched directory
exclude:
Exclude directories or files from triggering events in the watched directory.
Can use regex if regex is set to True
coalesce:
If this coalescing option is enabled, events are filtered based on
their unicity, only unique events are enqueued, doublons are discarded.
An event is unique when the combination of its fields (wd, mask,
cookie, name) is unique among events of a same batch. After a batch of
events is processed any events are accepted again.
This option is top-level (at the same level as the path) and therefore
affects all paths that are being watched. This is due to this option
being at the Notifier level in pyinotify.
'''
_config = {}
list(map(_config.update, config))
ret = []
notifier = _get_notifier(_config)
wm = notifier._watch_manager
# Read in existing events
if notifier.check_events(1):
notifier.read_events()
notifier.process_events()
queue = __context__['inotify.queue']
while queue:
event = queue.popleft()
_append = True
# Find the matching path in config
path = event.path
while path != '/':
if path in _config.get('files', {}):
break
path = os.path.dirname(path)
excludes = _config['files'][path].get('exclude', '')
if excludes and isinstance(excludes, list):
for exclude in excludes:
if isinstance(exclude, dict):
_exclude = next(iter(exclude))
if exclude[_exclude].get('regex', False):
try:
if re.search(_exclude, event.pathname):
_append = False
except Exception:
log.warning('Failed to compile regex: %s',
_exclude)
else:
exclude = _exclude
elif '*' in exclude:
if fnmatch.fnmatch(event.pathname, exclude):
_append = False
else:
if event.pathname.startswith(exclude):
_append = False
if _append:
sub = {'tag': event.path,
'path': event.pathname,
'change': event.maskname}
ret.append(sub)
else:
log.info('Excluding %s from event for %s', event.pathname, path)
# Get paths currently being watched
current = set()
for wd in wm.watches:
current.add(wm.watches[wd].path)
# Update existing watches and add new ones
# TODO: make the config handle more options
for path in _config.get('files', ()):
if isinstance(_config['files'][path], dict):
mask = _config['files'][path].get('mask', DEFAULT_MASK)
if isinstance(mask, list):
r_mask = 0
for sub in mask:
r_mask |= _get_mask(sub)
elif isinstance(mask, salt.ext.six.binary_type):
r_mask = _get_mask(mask)
else:
r_mask = mask
mask = r_mask
rec = _config['files'][path].get('recurse', False)
auto_add = _config['files'][path].get('auto_add', False)
else:
mask = DEFAULT_MASK
rec = False
auto_add = False
if path in current:
for wd in wm.watches:
if path == wm.watches[wd].path:
update = False
if wm.watches[wd].mask != mask:
update = True
if wm.watches[wd].auto_add != auto_add:
update = True
if update:
wm.update_watch(wd, mask=mask, rec=rec, auto_add=auto_add)
elif os.path.exists(path):
excludes = _config['files'][path].get('exclude', '')
excl = None
if isinstance(excludes, list):
excl = []
for exclude in excludes:
if isinstance(exclude, dict):
excl.append(list(exclude)[0])
else:
excl.append(exclude)
excl = pyinotify.ExcludeFilter(excl)
wm.add_watch(path, mask, rec=rec, auto_add=auto_add, exclude_filter=excl)
# Return event data
return ret
def close(config):
if 'inotify.notifier' in __context__:
__context__['inotify.notifier'].stop()
del __context__['inotify.notifier']
|
saltstack/salt
|
salt/beacons/inotify.py
|
validate
|
python
|
def validate(config):
'''
Validate the beacon configuration
'''
VALID_MASK = [
'access',
'attrib',
'close_nowrite',
'close_write',
'create',
'delete',
'delete_self',
'excl_unlink',
'ignored',
'modify',
'moved_from',
'moved_to',
'move_self',
'oneshot',
'onlydir',
'open',
'unmount'
]
# Configuration for inotify beacon should be a dict of dicts
if not isinstance(config, list):
return False, 'Configuration for inotify beacon must be a list.'
else:
_config = {}
list(map(_config.update, config))
if 'files' not in _config:
return False, 'Configuration for inotify beacon must include files.'
else:
for path in _config.get('files'):
if not isinstance(_config['files'][path], dict):
return False, ('Configuration for inotify beacon must '
'be a list of dictionaries.')
else:
if not any(j in ['mask',
'recurse',
'auto_add'] for j in _config['files'][path]):
return False, ('Configuration for inotify beacon must '
'contain mask, recurse or auto_add items.')
if 'auto_add' in _config['files'][path]:
if not isinstance(_config['files'][path]['auto_add'], bool):
return False, ('Configuration for inotify beacon '
'auto_add must be boolean.')
if 'recurse' in _config['files'][path]:
if not isinstance(_config['files'][path]['recurse'], bool):
return False, ('Configuration for inotify beacon '
'recurse must be boolean.')
if 'mask' in _config['files'][path]:
if not isinstance(_config['files'][path]['mask'], list):
return False, ('Configuration for inotify beacon '
'mask must be list.')
for mask in _config['files'][path]['mask']:
if mask not in VALID_MASK:
return False, ('Configuration for inotify beacon '
'invalid mask option {0}.'.format(mask))
return True, 'Valid beacon configuration'
|
Validate the beacon configuration
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/inotify.py#L85-L150
| null |
# -*- coding: utf-8 -*-
'''
Watch files and translate the changes into salt events
:depends: - pyinotify Python module >= 0.9.5
:Caution: Using generic mask options like open, access, ignored, and
closed_nowrite with reactors can easily cause the reactor
to loop on itself. To mitigate this behavior, consider
setting the `disable_during_state_run` flag to `True` in
the beacon configuration.
:note: The `inotify` beacon only works on OSes that have `inotify`
kernel support.
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import collections
import fnmatch
import logging
import os
import re
# Import salt libs
import salt.ext.six
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
# Import third party libs
try:
import pyinotify
HAS_PYINOTIFY = True
DEFAULT_MASK = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY
MASKS = {}
for var in dir(pyinotify):
if var.startswith('IN_'):
key = var[3:].lower()
MASKS[key] = getattr(pyinotify, var)
except ImportError:
HAS_PYINOTIFY = False
DEFAULT_MASK = None
__virtualname__ = 'inotify'
log = logging.getLogger(__name__)
def __virtual__():
if HAS_PYINOTIFY:
return __virtualname__
return False
def _get_mask(mask):
'''
Return the int that represents the mask
'''
return MASKS.get(mask, 0)
def _enqueue(revent):
'''
Enqueue the event
'''
__context__['inotify.queue'].append(revent)
def _get_notifier(config):
'''
Check the context for the notifier and construct it if not present
'''
if 'inotify.notifier' not in __context__:
__context__['inotify.queue'] = collections.deque()
wm = pyinotify.WatchManager()
__context__['inotify.notifier'] = pyinotify.Notifier(wm, _enqueue)
if ('coalesce' in config and
isinstance(config['coalesce'], bool) and
config['coalesce']):
__context__['inotify.notifier'].coalesce_events()
return __context__['inotify.notifier']
def beacon(config):
'''
Watch the configured files
Example Config
.. code-block:: yaml
beacons:
inotify:
- files:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
- coalesce: True
The mask list can contain the following events (the default mask is create,
delete, and modify):
* access - File accessed
* attrib - File metadata changed
* close_nowrite - Unwritable file closed
* close_write - Writable file closed
* create - File created in watched directory
* delete - File deleted from watched directory
* delete_self - Watched file or directory deleted
* modify - File modified
* moved_from - File moved out of watched directory
* moved_to - File moved into watched directory
* move_self - Watched file moved
* open - File opened
The mask can also contain the following options:
* dont_follow - Don't dereference symbolic links
* excl_unlink - Omit events for children after they have been unlinked
* oneshot - Remove watch after one event
* onlydir - Operate only if name is directory
recurse:
Recursively watch files in the directory
auto_add:
Automatically start watching files that are created in the watched directory
exclude:
Exclude directories or files from triggering events in the watched directory.
Can use regex if regex is set to True
coalesce:
If this coalescing option is enabled, events are filtered based on
their unicity, only unique events are enqueued, doublons are discarded.
An event is unique when the combination of its fields (wd, mask,
cookie, name) is unique among events of a same batch. After a batch of
events is processed any events are accepted again.
This option is top-level (at the same level as the path) and therefore
affects all paths that are being watched. This is due to this option
being at the Notifier level in pyinotify.
'''
_config = {}
list(map(_config.update, config))
ret = []
notifier = _get_notifier(_config)
wm = notifier._watch_manager
# Read in existing events
if notifier.check_events(1):
notifier.read_events()
notifier.process_events()
queue = __context__['inotify.queue']
while queue:
event = queue.popleft()
_append = True
# Find the matching path in config
path = event.path
while path != '/':
if path in _config.get('files', {}):
break
path = os.path.dirname(path)
excludes = _config['files'][path].get('exclude', '')
if excludes and isinstance(excludes, list):
for exclude in excludes:
if isinstance(exclude, dict):
_exclude = next(iter(exclude))
if exclude[_exclude].get('regex', False):
try:
if re.search(_exclude, event.pathname):
_append = False
except Exception:
log.warning('Failed to compile regex: %s',
_exclude)
else:
exclude = _exclude
elif '*' in exclude:
if fnmatch.fnmatch(event.pathname, exclude):
_append = False
else:
if event.pathname.startswith(exclude):
_append = False
if _append:
sub = {'tag': event.path,
'path': event.pathname,
'change': event.maskname}
ret.append(sub)
else:
log.info('Excluding %s from event for %s', event.pathname, path)
# Get paths currently being watched
current = set()
for wd in wm.watches:
current.add(wm.watches[wd].path)
# Update existing watches and add new ones
# TODO: make the config handle more options
for path in _config.get('files', ()):
if isinstance(_config['files'][path], dict):
mask = _config['files'][path].get('mask', DEFAULT_MASK)
if isinstance(mask, list):
r_mask = 0
for sub in mask:
r_mask |= _get_mask(sub)
elif isinstance(mask, salt.ext.six.binary_type):
r_mask = _get_mask(mask)
else:
r_mask = mask
mask = r_mask
rec = _config['files'][path].get('recurse', False)
auto_add = _config['files'][path].get('auto_add', False)
else:
mask = DEFAULT_MASK
rec = False
auto_add = False
if path in current:
for wd in wm.watches:
if path == wm.watches[wd].path:
update = False
if wm.watches[wd].mask != mask:
update = True
if wm.watches[wd].auto_add != auto_add:
update = True
if update:
wm.update_watch(wd, mask=mask, rec=rec, auto_add=auto_add)
elif os.path.exists(path):
excludes = _config['files'][path].get('exclude', '')
excl = None
if isinstance(excludes, list):
excl = []
for exclude in excludes:
if isinstance(exclude, dict):
excl.append(list(exclude)[0])
else:
excl.append(exclude)
excl = pyinotify.ExcludeFilter(excl)
wm.add_watch(path, mask, rec=rec, auto_add=auto_add, exclude_filter=excl)
# Return event data
return ret
def close(config):
if 'inotify.notifier' in __context__:
__context__['inotify.notifier'].stop()
del __context__['inotify.notifier']
|
saltstack/salt
|
salt/beacons/inotify.py
|
beacon
|
python
|
def beacon(config):
'''
Watch the configured files
Example Config
.. code-block:: yaml
beacons:
inotify:
- files:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
- coalesce: True
The mask list can contain the following events (the default mask is create,
delete, and modify):
* access - File accessed
* attrib - File metadata changed
* close_nowrite - Unwritable file closed
* close_write - Writable file closed
* create - File created in watched directory
* delete - File deleted from watched directory
* delete_self - Watched file or directory deleted
* modify - File modified
* moved_from - File moved out of watched directory
* moved_to - File moved into watched directory
* move_self - Watched file moved
* open - File opened
The mask can also contain the following options:
* dont_follow - Don't dereference symbolic links
* excl_unlink - Omit events for children after they have been unlinked
* oneshot - Remove watch after one event
* onlydir - Operate only if name is directory
recurse:
Recursively watch files in the directory
auto_add:
Automatically start watching files that are created in the watched directory
exclude:
Exclude directories or files from triggering events in the watched directory.
Can use regex if regex is set to True
coalesce:
If this coalescing option is enabled, events are filtered based on
their unicity, only unique events are enqueued, doublons are discarded.
An event is unique when the combination of its fields (wd, mask,
cookie, name) is unique among events of a same batch. After a batch of
events is processed any events are accepted again.
This option is top-level (at the same level as the path) and therefore
affects all paths that are being watched. This is due to this option
being at the Notifier level in pyinotify.
'''
_config = {}
list(map(_config.update, config))
ret = []
notifier = _get_notifier(_config)
wm = notifier._watch_manager
# Read in existing events
if notifier.check_events(1):
notifier.read_events()
notifier.process_events()
queue = __context__['inotify.queue']
while queue:
event = queue.popleft()
_append = True
# Find the matching path in config
path = event.path
while path != '/':
if path in _config.get('files', {}):
break
path = os.path.dirname(path)
excludes = _config['files'][path].get('exclude', '')
if excludes and isinstance(excludes, list):
for exclude in excludes:
if isinstance(exclude, dict):
_exclude = next(iter(exclude))
if exclude[_exclude].get('regex', False):
try:
if re.search(_exclude, event.pathname):
_append = False
except Exception:
log.warning('Failed to compile regex: %s',
_exclude)
else:
exclude = _exclude
elif '*' in exclude:
if fnmatch.fnmatch(event.pathname, exclude):
_append = False
else:
if event.pathname.startswith(exclude):
_append = False
if _append:
sub = {'tag': event.path,
'path': event.pathname,
'change': event.maskname}
ret.append(sub)
else:
log.info('Excluding %s from event for %s', event.pathname, path)
# Get paths currently being watched
current = set()
for wd in wm.watches:
current.add(wm.watches[wd].path)
# Update existing watches and add new ones
# TODO: make the config handle more options
for path in _config.get('files', ()):
if isinstance(_config['files'][path], dict):
mask = _config['files'][path].get('mask', DEFAULT_MASK)
if isinstance(mask, list):
r_mask = 0
for sub in mask:
r_mask |= _get_mask(sub)
elif isinstance(mask, salt.ext.six.binary_type):
r_mask = _get_mask(mask)
else:
r_mask = mask
mask = r_mask
rec = _config['files'][path].get('recurse', False)
auto_add = _config['files'][path].get('auto_add', False)
else:
mask = DEFAULT_MASK
rec = False
auto_add = False
if path in current:
for wd in wm.watches:
if path == wm.watches[wd].path:
update = False
if wm.watches[wd].mask != mask:
update = True
if wm.watches[wd].auto_add != auto_add:
update = True
if update:
wm.update_watch(wd, mask=mask, rec=rec, auto_add=auto_add)
elif os.path.exists(path):
excludes = _config['files'][path].get('exclude', '')
excl = None
if isinstance(excludes, list):
excl = []
for exclude in excludes:
if isinstance(exclude, dict):
excl.append(list(exclude)[0])
else:
excl.append(exclude)
excl = pyinotify.ExcludeFilter(excl)
wm.add_watch(path, mask, rec=rec, auto_add=auto_add, exclude_filter=excl)
# Return event data
return ret
|
Watch the configured files
Example Config
.. code-block:: yaml
beacons:
inotify:
- files:
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[a-m]*$:
regex: True
- coalesce: True
The mask list can contain the following events (the default mask is create,
delete, and modify):
* access - File accessed
* attrib - File metadata changed
* close_nowrite - Unwritable file closed
* close_write - Writable file closed
* create - File created in watched directory
* delete - File deleted from watched directory
* delete_self - Watched file or directory deleted
* modify - File modified
* moved_from - File moved out of watched directory
* moved_to - File moved into watched directory
* move_self - Watched file moved
* open - File opened
The mask can also contain the following options:
* dont_follow - Don't dereference symbolic links
* excl_unlink - Omit events for children after they have been unlinked
* oneshot - Remove watch after one event
* onlydir - Operate only if name is directory
recurse:
Recursively watch files in the directory
auto_add:
Automatically start watching files that are created in the watched directory
exclude:
Exclude directories or files from triggering events in the watched directory.
Can use regex if regex is set to True
coalesce:
If this coalescing option is enabled, events are filtered based on
their unicity, only unique events are enqueued, doublons are discarded.
An event is unique when the combination of its fields (wd, mask,
cookie, name) is unique among events of a same batch. After a batch of
events is processed any events are accepted again.
This option is top-level (at the same level as the path) and therefore
affects all paths that are being watched. This is due to this option
being at the Notifier level in pyinotify.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/inotify.py#L153-L323
|
[
"def _get_mask(mask):\n '''\n Return the int that represents the mask\n '''\n return MASKS.get(mask, 0)\n",
"def _get_notifier(config):\n '''\n Check the context for the notifier and construct it if not present\n '''\n if 'inotify.notifier' not in __context__:\n __context__['inotify.queue'] = collections.deque()\n wm = pyinotify.WatchManager()\n __context__['inotify.notifier'] = pyinotify.Notifier(wm, _enqueue)\n if ('coalesce' in config and\n isinstance(config['coalesce'], bool) and\n config['coalesce']):\n __context__['inotify.notifier'].coalesce_events()\n return __context__['inotify.notifier']\n"
] |
# -*- coding: utf-8 -*-
'''
Watch files and translate the changes into salt events
:depends: - pyinotify Python module >= 0.9.5
:Caution: Using generic mask options like open, access, ignored, and
closed_nowrite with reactors can easily cause the reactor
to loop on itself. To mitigate this behavior, consider
setting the `disable_during_state_run` flag to `True` in
the beacon configuration.
:note: The `inotify` beacon only works on OSes that have `inotify`
kernel support.
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import collections
import fnmatch
import logging
import os
import re
# Import salt libs
import salt.ext.six
# pylint: disable=import-error
from salt.ext.six.moves import map
# pylint: enable=import-error
# Import third party libs
try:
import pyinotify
HAS_PYINOTIFY = True
DEFAULT_MASK = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY
MASKS = {}
for var in dir(pyinotify):
if var.startswith('IN_'):
key = var[3:].lower()
MASKS[key] = getattr(pyinotify, var)
except ImportError:
HAS_PYINOTIFY = False
DEFAULT_MASK = None
__virtualname__ = 'inotify'
log = logging.getLogger(__name__)
def __virtual__():
if HAS_PYINOTIFY:
return __virtualname__
return False
def _get_mask(mask):
'''
Return the int that represents the mask
'''
return MASKS.get(mask, 0)
def _enqueue(revent):
'''
Enqueue the event
'''
__context__['inotify.queue'].append(revent)
def _get_notifier(config):
'''
Check the context for the notifier and construct it if not present
'''
if 'inotify.notifier' not in __context__:
__context__['inotify.queue'] = collections.deque()
wm = pyinotify.WatchManager()
__context__['inotify.notifier'] = pyinotify.Notifier(wm, _enqueue)
if ('coalesce' in config and
isinstance(config['coalesce'], bool) and
config['coalesce']):
__context__['inotify.notifier'].coalesce_events()
return __context__['inotify.notifier']
def validate(config):
'''
Validate the beacon configuration
'''
VALID_MASK = [
'access',
'attrib',
'close_nowrite',
'close_write',
'create',
'delete',
'delete_self',
'excl_unlink',
'ignored',
'modify',
'moved_from',
'moved_to',
'move_self',
'oneshot',
'onlydir',
'open',
'unmount'
]
# Configuration for inotify beacon should be a dict of dicts
if not isinstance(config, list):
return False, 'Configuration for inotify beacon must be a list.'
else:
_config = {}
list(map(_config.update, config))
if 'files' not in _config:
return False, 'Configuration for inotify beacon must include files.'
else:
for path in _config.get('files'):
if not isinstance(_config['files'][path], dict):
return False, ('Configuration for inotify beacon must '
'be a list of dictionaries.')
else:
if not any(j in ['mask',
'recurse',
'auto_add'] for j in _config['files'][path]):
return False, ('Configuration for inotify beacon must '
'contain mask, recurse or auto_add items.')
if 'auto_add' in _config['files'][path]:
if not isinstance(_config['files'][path]['auto_add'], bool):
return False, ('Configuration for inotify beacon '
'auto_add must be boolean.')
if 'recurse' in _config['files'][path]:
if not isinstance(_config['files'][path]['recurse'], bool):
return False, ('Configuration for inotify beacon '
'recurse must be boolean.')
if 'mask' in _config['files'][path]:
if not isinstance(_config['files'][path]['mask'], list):
return False, ('Configuration for inotify beacon '
'mask must be list.')
for mask in _config['files'][path]['mask']:
if mask not in VALID_MASK:
return False, ('Configuration for inotify beacon '
'invalid mask option {0}.'.format(mask))
return True, 'Valid beacon configuration'
def close(config):
if 'inotify.notifier' in __context__:
__context__['inotify.notifier'].stop()
del __context__['inotify.notifier']
|
saltstack/salt
|
salt/modules/gem.py
|
_gem
|
python
|
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
|
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L23-L64
| null |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
install
|
python
|
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
|
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L67-L146
|
[
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n",
"def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):\n match = re.match(r'^3\\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))\n if match:\n return True\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
uninstall
|
python
|
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
|
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L149-L177
|
[
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
update
|
python
|
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
|
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L180-L208
|
[
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
update_system
|
python
|
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
|
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L211-L234
|
[
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
version
|
python
|
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
|
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L237-L266
|
[
"def split(orig, sep=None):\n '''\n Generator function for iterating through large strings, particularly useful\n as a replacement for str.splitlines().\n\n See http://stackoverflow.com/a/3865367\n '''\n exp = re.compile(r'\\s+' if sep is None else re.escape(sep))\n pos = 0\n length = len(orig)\n while True:\n match = exp.search(orig, pos)\n if not match:\n if pos < length or sep is not None:\n val = orig[pos:]\n if val:\n # Only yield a value if the slice was not an empty string,\n # because if it is then we've reached the end. This keeps\n # us from yielding an extra blank value at the end.\n yield val\n break\n if pos < match.start() or sep is not None:\n yield orig[pos:match.start()]\n pos = match.end()\n",
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
list_
|
python
|
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
|
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L276-L310
|
[
"def split(orig, sep=None):\n '''\n Generator function for iterating through large strings, particularly useful\n as a replacement for str.splitlines().\n\n See http://stackoverflow.com/a/3865367\n '''\n exp = re.compile(r'\\s+' if sep is None else re.escape(sep))\n pos = 0\n length = len(orig)\n while True:\n match = exp.search(orig, pos)\n if not match:\n if pos < length or sep is not None:\n val = orig[pos:]\n if val:\n # Only yield a value if the slice was not an empty string,\n # because if it is then we've reached the end. This keeps\n # us from yielding an extra blank value at the end.\n yield val\n break\n if pos < match.start() or sep is not None:\n yield orig[pos:match.start()]\n pos = match.end()\n",
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
list_upgrades
|
python
|
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
|
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L313-L348
|
[
"def split(orig, sep=None):\n '''\n Generator function for iterating through large strings, particularly useful\n as a replacement for str.splitlines().\n\n See http://stackoverflow.com/a/3865367\n '''\n exp = re.compile(r'\\s+' if sep is None else re.escape(sep))\n pos = 0\n length = len(orig)\n while True:\n match = exp.search(orig, pos)\n if not match:\n if pos < length or sep is not None:\n val = orig[pos:]\n if val:\n # Only yield a value if the slice was not an empty string,\n # because if it is then we've reached the end. This keeps\n # us from yielding an extra blank value at the end.\n yield val\n break\n if pos < match.start() or sep is not None:\n yield orig[pos:match.start()]\n pos = match.end()\n",
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
sources_add
|
python
|
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
|
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L351-L374
|
[
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
sources_remove
|
python
|
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
|
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L377-L400
|
[
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
saltstack/salt
|
salt/modules/gem.py
|
sources_list
|
python
|
def sources_list(ruby=None, runas=None, gem_bin=None):
'''
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
'''
ret = _gem(['sources'], ruby, gem_bin=gem_bin, runas=runas)
return [] if ret is False else ret.splitlines()[2:]
|
List the configured gem sources.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_list
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/gem.py#L403-L422
|
[
"def _gem(command, ruby=None, runas=None, gem_bin=None):\n '''\n Run the actual gem command. If rvm or rbenv is installed, run the command\n using the corresponding module. rbenv is not available on windows, so don't\n try.\n\n :param command: string\n Command to run\n :param ruby: string : None\n If RVM or rbenv are installed, the ruby version and gemset to use.\n Ignored if ``gem_bin`` is specified.\n :param runas: string : None\n The user to run gem as.\n :param gem_bin: string : None\n Full path to the ``gem`` binary\n\n :return:\n Returns the full standard out including success codes or False if it fails\n '''\n cmdline = [gem_bin or 'gem'] + command\n\n # If a custom gem is given, use that and don't check for rvm/rbenv. User\n # knows best!\n if gem_bin is None:\n if __salt__['rvm.is_installed'](runas=runas):\n return __salt__['rvm.do'](ruby, cmdline, runas=runas)\n\n if not salt.utils.platform.is_windows() \\\n and __salt__['rbenv.is_installed'](runas=runas):\n if ruby is None:\n return __salt__['rbenv.do'](cmdline, runas=runas)\n else:\n return __salt__['rbenv.do_with_ruby'](ruby,\n cmdline,\n runas=runas)\n\n ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)\n\n if ret['retcode'] == 0:\n return ret['stdout']\n else:\n raise CommandExecutionError(ret['stderr'])\n"
] |
# -*- coding: utf-8 -*-
'''
Manage ruby gems.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import re
import logging
# Import Salt libs
import salt.utils.itertools
import salt.utils.platform
from salt.exceptions import CommandExecutionError
__func_alias__ = {
'list_': 'list'
}
log = logging.getLogger(__name__) # pylint: disable=C0103
def _gem(command, ruby=None, runas=None, gem_bin=None):
'''
Run the actual gem command. If rvm or rbenv is installed, run the command
using the corresponding module. rbenv is not available on windows, so don't
try.
:param command: string
Command to run
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param gem_bin: string : None
Full path to the ``gem`` binary
:return:
Returns the full standard out including success codes or False if it fails
'''
cmdline = [gem_bin or 'gem'] + command
# If a custom gem is given, use that and don't check for rvm/rbenv. User
# knows best!
if gem_bin is None:
if __salt__['rvm.is_installed'](runas=runas):
return __salt__['rvm.do'](ruby, cmdline, runas=runas)
if not salt.utils.platform.is_windows() \
and __salt__['rbenv.is_installed'](runas=runas):
if ruby is None:
return __salt__['rbenv.do'](cmdline, runas=runas)
else:
return __salt__['rbenv.do_with_ruby'](ruby,
cmdline,
runas=runas)
ret = __salt__['cmd.run_all'](cmdline, runas=runas, python_shell=False)
if ret['retcode'] == 0:
return ret['stdout']
else:
raise CommandExecutionError(ret['stderr'])
def install(gems, # pylint: disable=C0103
ruby=None,
gem_bin=None,
runas=None,
version=None,
rdoc=False,
ri=False,
pre_releases=False,
proxy=None,
source=None): # pylint: disable=C0103
'''
Installs one or several gems.
:param gems: string
The gems to install
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
:param version: string : None
Specify the version to install for the gem.
Doesn't play nice with multiple gems at once
:param rdoc: boolean : False
Generate RDoc documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
ri option will then be ignored
:param ri: boolean : False
Generate RI documentation for the gem(s).
For rubygems > 3 this is interpreted as the --no-document arg and the
rdoc option will then be ignored
:param pre_releases: boolean : False
Include pre-releases in the available versions
:param proxy: string : None
Use the specified HTTP proxy server for all outgoing traffic.
Format: http://hostname[:port]
source : None
Use the specified HTTP gem source server to download gem.
Format: http://hostname[:port]
CLI Example:
.. code-block:: bash
salt '*' gem.install vagrant
salt '*' gem.install redphone gem_bin=/opt/sensu/embedded/bin/gem
'''
try:
gems = gems.split()
except AttributeError:
pass
options = []
if version:
options.extend(['--version', version])
if _has_rubygems_3(ruby=ruby, runas=runas, gem_bin=gem_bin):
if not rdoc or not ri:
options.append('--no-document')
if pre_releases:
options.append('--prerelease')
else:
if not rdoc:
options.append('--no-rdoc')
if not ri:
options.append('--no-ri')
if pre_releases:
options.append('--pre')
if proxy:
options.extend(['-p', proxy])
if source:
options.extend(['--source', source])
return _gem(['install'] + gems + options,
ruby,
gem_bin=gem_bin,
runas=runas)
def uninstall(gems, ruby=None, runas=None, gem_bin=None):
'''
Uninstall one or several gems.
:param gems: string
The gems to uninstall.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.uninstall vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['uninstall'] + gems + ['-a', '-x'],
ruby,
gem_bin=gem_bin,
runas=runas)
def update(gems, ruby=None, runas=None, gem_bin=None):
'''
Update one or several gems.
:param gems: string
The gems to update.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update vagrant
'''
try:
gems = gems.split()
except AttributeError:
pass
return _gem(['update'] + gems,
ruby,
gem_bin=gem_bin,
runas=runas)
def update_system(version='', ruby=None, runas=None, gem_bin=None):
'''
Update rubygems.
:param version: string : (newest)
The version of rubygems to install.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.update_system
'''
return _gem(['update', '--system', version],
ruby,
gem_bin=gem_bin,
runas=runas)
def version(ruby=None, runas=None, gem_bin=None):
'''
Print out the version of gem
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.version
'''
cmd = ['--version']
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'[.0-9]+', line)
if match:
ret = line
break
return ret
def _has_rubygems_3(ruby=None, runas=None, gem_bin=None):
match = re.match(r'^3\..*', version(ruby=ruby, runas=runas, gem_bin=gem_bin))
if match:
return True
return False
def list_(prefix='', ruby=None, runas=None, gem_bin=None):
'''
List locally installed gems.
:param prefix: string :
Only list gems when the name matches this prefix.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list
'''
cmd = ['list']
if prefix:
cmd.append(prefix)
stdout = _gem(cmd,
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(stdout, '\n'):
match = re.match(r'^([^ ]+) \((.+)\)', line)
if match:
gem = match.group(1)
versions = match.group(2).split(', ')
ret[gem] = versions
return ret
def list_upgrades(ruby=None,
runas=None,
gem_bin=None):
'''
.. versionadded:: 2015.8.0
Check if an upgrade is available for installed gems
gem_bin : None
Full path to ``gem`` binary to use.
ruby : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
runas : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.list_upgrades
'''
result = _gem(['outdated'],
ruby,
gem_bin=gem_bin,
runas=runas)
ret = {}
for line in salt.utils.itertools.split(result, '\n'):
match = re.search(r'(\S+) \(\S+ < (\S+)\)', line)
if match:
name, version = match.groups()
else:
log.error('Can\'t parse line \'%s\'', line)
continue
ret[name] = version
return ret
def sources_add(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Add a gem source.
:param source_uri: string
The source URI to add.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_add http://rubygems.org/
'''
return _gem(['sources', '--add', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
def sources_remove(source_uri, ruby=None, runas=None, gem_bin=None):
'''
Remove a gem source.
:param source_uri: string
The source URI to remove.
:param gem_bin: string : None
Full path to ``gem`` binary to use.
:param ruby: string : None
If RVM or rbenv are installed, the ruby version and gemset to use.
Ignored if ``gem_bin`` is specified.
:param runas: string : None
The user to run gem as.
CLI Example:
.. code-block:: bash
salt '*' gem.sources_remove http://rubygems.org/
'''
return _gem(['sources', '--remove', source_uri],
ruby,
gem_bin=gem_bin,
runas=runas)
|
saltstack/salt
|
salt/states/icinga2.py
|
generate_ticket
|
python
|
def generate_ticket(name, output=None, grain=None, key=None, overwrite=True):
'''
Generate an icinga2 ticket on the master.
name
The domain name for which this ticket will be generated
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Checking if execution is needed.
if output == 'grain':
if grain and not key:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain {0} already set'.format(grain)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}'.format(grain)
return ret
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain {0}:{1} already set'.format(grain, key)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}:{1}'.format(grain, key)
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain parameter\n"
return ret
elif output:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File {0} already set'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in file: {0}'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, not storing result'
return ret
# Executing the command.
ticket_res = __salt__['icinga2.generate_ticket'](name)
ticket = ticket_res['stdout']
if not ticket_res['retcode']:
ret['comment'] = six.text_type(ticket)
if output == 'grain':
if grain and not key:
__salt__['grains.setval'](grain, ticket)
ret['changes']['ticket'] = "Executed. Output into grain: {0}".format(grain)
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = ticket
__salt__['grains.setval'](grain, grain_value)
ret['changes']['ticket'] = "Executed. Output into grain: {0}:{1}".format(grain, key)
elif output:
ret['changes']['ticket'] = "Executed. Output into {0}".format(output)
with salt.utils.files.fopen(output, 'w') as output_file:
output_file.write(salt.utils.stringutils.to_str(ticket))
else:
ret['changes']['ticket'] = "Executed"
return ret
|
Generate an icinga2 ticket on the master.
name
The domain name for which this ticket will be generated
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/icinga2.py#L40-L131
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n"
] |
# -*- coding: utf-8 -*-
'''
Icinga2 state
=============
.. versionadded:: 2017.7.0
:depends: - Icinga2 Python module
:configuration: See :py:mod:`salt.modules.icinga2` for setup instructions.
The icinga2 module is used to execute commands.
Its output may be stored in a file or in a grain.
.. code-block:: yaml
command_id:
icinga2.generate_ticket:
- name: domain.tld
- output: "/tmp/query_id.txt"
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os.path
# Import Salt libs
from salt.ext import six
import salt.utils.files
import salt.utils.stringutils
from salt.utils.icinga2 import get_certs_path
def __virtual__():
'''
Only load if the icinga2 module is available in __salt__
'''
return 'icinga2.generate_ticket' in __salt__
def generate_cert(name):
'''
Generate an icinga2 certificate and key on the client.
name
The domain name for which this certificate and key will be generated
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt".format(get_certs_path(), name)
key = "{0}{1}.key".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(key):
ret['comment'] = 'No execution needed. Cert: {0} and key: {1} already generated.'.format(cert, key)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate and key generation would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.generate_cert'](name)
if not cert_save['retcode']:
ret['comment'] = "Certificate and key generated"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
ret['changes']['key'] = "Executed. Key saved: {0}".format(key)
return ret
def save_cert(name, master):
'''
Save the certificate on master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}trusted-master.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already saved.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate save for icinga2 master would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.save_cert'](name, master)
if not cert_save['retcode']:
ret['comment'] = "Certificate for icinga2 master saved"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
return ret
def request_cert(name, master, ticket, port="5665"):
'''
Request CA certificate from master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
port
Icinga2 port, defaults to 5665
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}ca.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already exists.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate request from icinga2 master would be executed'
return ret
# Executing the command.
cert_request = __salt__['icinga2.request_cert'](name, master, ticket, port)
if not cert_request['retcode']:
ret['comment'] = "Certificate request from icinga2 master executed"
ret['changes']['cert'] = "Executed. Certificate requested: {0}".format(cert)
return ret
ret['comment'] = "FAILED. Certificate requested failed with output: {0}".format(cert_request['stdout'])
ret['result'] = False
return ret
def node_setup(name, master, ticket):
'''
Setup the icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt.orig".format(get_certs_path(), name)
key = "{0}{1}.key.orig".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(cert):
ret['comment'] = 'No execution needed. Node already configured.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Node setup will be executed.'
return ret
# Executing the command.
node_setup = __salt__['icinga2.node_setup'](name, master, ticket)
if not node_setup['retcode']:
ret['comment'] = "Node setup executed."
ret['changes']['cert'] = "Node setup finished successfully."
return ret
ret['comment'] = "FAILED. Node setup failed with outpu: {0}".format(node_setup['stdout'])
ret['result'] = False
return ret
|
saltstack/salt
|
salt/states/icinga2.py
|
generate_cert
|
python
|
def generate_cert(name):
'''
Generate an icinga2 certificate and key on the client.
name
The domain name for which this certificate and key will be generated
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt".format(get_certs_path(), name)
key = "{0}{1}.key".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(key):
ret['comment'] = 'No execution needed. Cert: {0} and key: {1} already generated.'.format(cert, key)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate and key generation would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.generate_cert'](name)
if not cert_save['retcode']:
ret['comment'] = "Certificate and key generated"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
ret['changes']['key'] = "Executed. Key saved: {0}".format(key)
return ret
|
Generate an icinga2 certificate and key on the client.
name
The domain name for which this certificate and key will be generated
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/icinga2.py#L134-L163
|
[
"def get_certs_path():\n icinga2_output = __salt__['cmd.run_all']([salt.utils.path.which('icinga2'),\n \"--version\"], python_shell=False)\n version = re.search(r'r\\d+\\.\\d+', icinga2_output['stdout']).group(0)\n # Return new certs path for icinga2 >= 2.8\n if int(version.split('.')[1]) >= 8:\n return '/var/lib/icinga2/certs/'\n # Keep backwords compatibility with older icinga2\n return '/etc/icinga2/pki/'\n"
] |
# -*- coding: utf-8 -*-
'''
Icinga2 state
=============
.. versionadded:: 2017.7.0
:depends: - Icinga2 Python module
:configuration: See :py:mod:`salt.modules.icinga2` for setup instructions.
The icinga2 module is used to execute commands.
Its output may be stored in a file or in a grain.
.. code-block:: yaml
command_id:
icinga2.generate_ticket:
- name: domain.tld
- output: "/tmp/query_id.txt"
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os.path
# Import Salt libs
from salt.ext import six
import salt.utils.files
import salt.utils.stringutils
from salt.utils.icinga2 import get_certs_path
def __virtual__():
'''
Only load if the icinga2 module is available in __salt__
'''
return 'icinga2.generate_ticket' in __salt__
def generate_ticket(name, output=None, grain=None, key=None, overwrite=True):
'''
Generate an icinga2 ticket on the master.
name
The domain name for which this ticket will be generated
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Checking if execution is needed.
if output == 'grain':
if grain and not key:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain {0} already set'.format(grain)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}'.format(grain)
return ret
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain {0}:{1} already set'.format(grain, key)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}:{1}'.format(grain, key)
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain parameter\n"
return ret
elif output:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File {0} already set'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in file: {0}'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, not storing result'
return ret
# Executing the command.
ticket_res = __salt__['icinga2.generate_ticket'](name)
ticket = ticket_res['stdout']
if not ticket_res['retcode']:
ret['comment'] = six.text_type(ticket)
if output == 'grain':
if grain and not key:
__salt__['grains.setval'](grain, ticket)
ret['changes']['ticket'] = "Executed. Output into grain: {0}".format(grain)
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = ticket
__salt__['grains.setval'](grain, grain_value)
ret['changes']['ticket'] = "Executed. Output into grain: {0}:{1}".format(grain, key)
elif output:
ret['changes']['ticket'] = "Executed. Output into {0}".format(output)
with salt.utils.files.fopen(output, 'w') as output_file:
output_file.write(salt.utils.stringutils.to_str(ticket))
else:
ret['changes']['ticket'] = "Executed"
return ret
def save_cert(name, master):
'''
Save the certificate on master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}trusted-master.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already saved.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate save for icinga2 master would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.save_cert'](name, master)
if not cert_save['retcode']:
ret['comment'] = "Certificate for icinga2 master saved"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
return ret
def request_cert(name, master, ticket, port="5665"):
'''
Request CA certificate from master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
port
Icinga2 port, defaults to 5665
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}ca.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already exists.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate request from icinga2 master would be executed'
return ret
# Executing the command.
cert_request = __salt__['icinga2.request_cert'](name, master, ticket, port)
if not cert_request['retcode']:
ret['comment'] = "Certificate request from icinga2 master executed"
ret['changes']['cert'] = "Executed. Certificate requested: {0}".format(cert)
return ret
ret['comment'] = "FAILED. Certificate requested failed with output: {0}".format(cert_request['stdout'])
ret['result'] = False
return ret
def node_setup(name, master, ticket):
'''
Setup the icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt.orig".format(get_certs_path(), name)
key = "{0}{1}.key.orig".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(cert):
ret['comment'] = 'No execution needed. Node already configured.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Node setup will be executed.'
return ret
# Executing the command.
node_setup = __salt__['icinga2.node_setup'](name, master, ticket)
if not node_setup['retcode']:
ret['comment'] = "Node setup executed."
ret['changes']['cert'] = "Node setup finished successfully."
return ret
ret['comment'] = "FAILED. Node setup failed with outpu: {0}".format(node_setup['stdout'])
ret['result'] = False
return ret
|
saltstack/salt
|
salt/states/icinga2.py
|
save_cert
|
python
|
def save_cert(name, master):
'''
Save the certificate on master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}trusted-master.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already saved.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate save for icinga2 master would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.save_cert'](name, master)
if not cert_save['retcode']:
ret['comment'] = "Certificate for icinga2 master saved"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
return ret
|
Save the certificate on master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/icinga2.py#L166-L196
|
[
"def get_certs_path():\n icinga2_output = __salt__['cmd.run_all']([salt.utils.path.which('icinga2'),\n \"--version\"], python_shell=False)\n version = re.search(r'r\\d+\\.\\d+', icinga2_output['stdout']).group(0)\n # Return new certs path for icinga2 >= 2.8\n if int(version.split('.')[1]) >= 8:\n return '/var/lib/icinga2/certs/'\n # Keep backwords compatibility with older icinga2\n return '/etc/icinga2/pki/'\n"
] |
# -*- coding: utf-8 -*-
'''
Icinga2 state
=============
.. versionadded:: 2017.7.0
:depends: - Icinga2 Python module
:configuration: See :py:mod:`salt.modules.icinga2` for setup instructions.
The icinga2 module is used to execute commands.
Its output may be stored in a file or in a grain.
.. code-block:: yaml
command_id:
icinga2.generate_ticket:
- name: domain.tld
- output: "/tmp/query_id.txt"
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os.path
# Import Salt libs
from salt.ext import six
import salt.utils.files
import salt.utils.stringutils
from salt.utils.icinga2 import get_certs_path
def __virtual__():
'''
Only load if the icinga2 module is available in __salt__
'''
return 'icinga2.generate_ticket' in __salt__
def generate_ticket(name, output=None, grain=None, key=None, overwrite=True):
'''
Generate an icinga2 ticket on the master.
name
The domain name for which this ticket will be generated
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Checking if execution is needed.
if output == 'grain':
if grain and not key:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain {0} already set'.format(grain)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}'.format(grain)
return ret
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain {0}:{1} already set'.format(grain, key)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}:{1}'.format(grain, key)
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain parameter\n"
return ret
elif output:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File {0} already set'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in file: {0}'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, not storing result'
return ret
# Executing the command.
ticket_res = __salt__['icinga2.generate_ticket'](name)
ticket = ticket_res['stdout']
if not ticket_res['retcode']:
ret['comment'] = six.text_type(ticket)
if output == 'grain':
if grain and not key:
__salt__['grains.setval'](grain, ticket)
ret['changes']['ticket'] = "Executed. Output into grain: {0}".format(grain)
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = ticket
__salt__['grains.setval'](grain, grain_value)
ret['changes']['ticket'] = "Executed. Output into grain: {0}:{1}".format(grain, key)
elif output:
ret['changes']['ticket'] = "Executed. Output into {0}".format(output)
with salt.utils.files.fopen(output, 'w') as output_file:
output_file.write(salt.utils.stringutils.to_str(ticket))
else:
ret['changes']['ticket'] = "Executed"
return ret
def generate_cert(name):
'''
Generate an icinga2 certificate and key on the client.
name
The domain name for which this certificate and key will be generated
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt".format(get_certs_path(), name)
key = "{0}{1}.key".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(key):
ret['comment'] = 'No execution needed. Cert: {0} and key: {1} already generated.'.format(cert, key)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate and key generation would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.generate_cert'](name)
if not cert_save['retcode']:
ret['comment'] = "Certificate and key generated"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
ret['changes']['key'] = "Executed. Key saved: {0}".format(key)
return ret
def request_cert(name, master, ticket, port="5665"):
'''
Request CA certificate from master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
port
Icinga2 port, defaults to 5665
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}ca.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already exists.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate request from icinga2 master would be executed'
return ret
# Executing the command.
cert_request = __salt__['icinga2.request_cert'](name, master, ticket, port)
if not cert_request['retcode']:
ret['comment'] = "Certificate request from icinga2 master executed"
ret['changes']['cert'] = "Executed. Certificate requested: {0}".format(cert)
return ret
ret['comment'] = "FAILED. Certificate requested failed with output: {0}".format(cert_request['stdout'])
ret['result'] = False
return ret
def node_setup(name, master, ticket):
'''
Setup the icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt.orig".format(get_certs_path(), name)
key = "{0}{1}.key.orig".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(cert):
ret['comment'] = 'No execution needed. Node already configured.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Node setup will be executed.'
return ret
# Executing the command.
node_setup = __salt__['icinga2.node_setup'](name, master, ticket)
if not node_setup['retcode']:
ret['comment'] = "Node setup executed."
ret['changes']['cert'] = "Node setup finished successfully."
return ret
ret['comment'] = "FAILED. Node setup failed with outpu: {0}".format(node_setup['stdout'])
ret['result'] = False
return ret
|
saltstack/salt
|
salt/states/icinga2.py
|
request_cert
|
python
|
def request_cert(name, master, ticket, port="5665"):
'''
Request CA certificate from master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
port
Icinga2 port, defaults to 5665
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}ca.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already exists.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate request from icinga2 master would be executed'
return ret
# Executing the command.
cert_request = __salt__['icinga2.request_cert'](name, master, ticket, port)
if not cert_request['retcode']:
ret['comment'] = "Certificate request from icinga2 master executed"
ret['changes']['cert'] = "Executed. Certificate requested: {0}".format(cert)
return ret
ret['comment'] = "FAILED. Certificate requested failed with output: {0}".format(cert_request['stdout'])
ret['result'] = False
return ret
|
Request CA certificate from master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
port
Icinga2 port, defaults to 5665
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/icinga2.py#L199-L239
|
[
"def get_certs_path():\n icinga2_output = __salt__['cmd.run_all']([salt.utils.path.which('icinga2'),\n \"--version\"], python_shell=False)\n version = re.search(r'r\\d+\\.\\d+', icinga2_output['stdout']).group(0)\n # Return new certs path for icinga2 >= 2.8\n if int(version.split('.')[1]) >= 8:\n return '/var/lib/icinga2/certs/'\n # Keep backwords compatibility with older icinga2\n return '/etc/icinga2/pki/'\n"
] |
# -*- coding: utf-8 -*-
'''
Icinga2 state
=============
.. versionadded:: 2017.7.0
:depends: - Icinga2 Python module
:configuration: See :py:mod:`salt.modules.icinga2` for setup instructions.
The icinga2 module is used to execute commands.
Its output may be stored in a file or in a grain.
.. code-block:: yaml
command_id:
icinga2.generate_ticket:
- name: domain.tld
- output: "/tmp/query_id.txt"
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os.path
# Import Salt libs
from salt.ext import six
import salt.utils.files
import salt.utils.stringutils
from salt.utils.icinga2 import get_certs_path
def __virtual__():
'''
Only load if the icinga2 module is available in __salt__
'''
return 'icinga2.generate_ticket' in __salt__
def generate_ticket(name, output=None, grain=None, key=None, overwrite=True):
'''
Generate an icinga2 ticket on the master.
name
The domain name for which this ticket will be generated
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Checking if execution is needed.
if output == 'grain':
if grain and not key:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain {0} already set'.format(grain)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}'.format(grain)
return ret
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain {0}:{1} already set'.format(grain, key)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}:{1}'.format(grain, key)
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain parameter\n"
return ret
elif output:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File {0} already set'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in file: {0}'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, not storing result'
return ret
# Executing the command.
ticket_res = __salt__['icinga2.generate_ticket'](name)
ticket = ticket_res['stdout']
if not ticket_res['retcode']:
ret['comment'] = six.text_type(ticket)
if output == 'grain':
if grain and not key:
__salt__['grains.setval'](grain, ticket)
ret['changes']['ticket'] = "Executed. Output into grain: {0}".format(grain)
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = ticket
__salt__['grains.setval'](grain, grain_value)
ret['changes']['ticket'] = "Executed. Output into grain: {0}:{1}".format(grain, key)
elif output:
ret['changes']['ticket'] = "Executed. Output into {0}".format(output)
with salt.utils.files.fopen(output, 'w') as output_file:
output_file.write(salt.utils.stringutils.to_str(ticket))
else:
ret['changes']['ticket'] = "Executed"
return ret
def generate_cert(name):
'''
Generate an icinga2 certificate and key on the client.
name
The domain name for which this certificate and key will be generated
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt".format(get_certs_path(), name)
key = "{0}{1}.key".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(key):
ret['comment'] = 'No execution needed. Cert: {0} and key: {1} already generated.'.format(cert, key)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate and key generation would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.generate_cert'](name)
if not cert_save['retcode']:
ret['comment'] = "Certificate and key generated"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
ret['changes']['key'] = "Executed. Key saved: {0}".format(key)
return ret
def save_cert(name, master):
'''
Save the certificate on master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}trusted-master.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already saved.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate save for icinga2 master would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.save_cert'](name, master)
if not cert_save['retcode']:
ret['comment'] = "Certificate for icinga2 master saved"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
return ret
def node_setup(name, master, ticket):
'''
Setup the icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt.orig".format(get_certs_path(), name)
key = "{0}{1}.key.orig".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(cert):
ret['comment'] = 'No execution needed. Node already configured.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Node setup will be executed.'
return ret
# Executing the command.
node_setup = __salt__['icinga2.node_setup'](name, master, ticket)
if not node_setup['retcode']:
ret['comment'] = "Node setup executed."
ret['changes']['cert'] = "Node setup finished successfully."
return ret
ret['comment'] = "FAILED. Node setup failed with outpu: {0}".format(node_setup['stdout'])
ret['result'] = False
return ret
|
saltstack/salt
|
salt/states/icinga2.py
|
node_setup
|
python
|
def node_setup(name, master, ticket):
'''
Setup the icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt.orig".format(get_certs_path(), name)
key = "{0}{1}.key.orig".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(cert):
ret['comment'] = 'No execution needed. Node already configured.'
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Node setup will be executed.'
return ret
# Executing the command.
node_setup = __salt__['icinga2.node_setup'](name, master, ticket)
if not node_setup['retcode']:
ret['comment'] = "Node setup executed."
ret['changes']['cert'] = "Node setup finished successfully."
return ret
ret['comment'] = "FAILED. Node setup failed with outpu: {0}".format(node_setup['stdout'])
ret['result'] = False
return ret
|
Setup the icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/icinga2.py#L242-L280
|
[
"def get_certs_path():\n icinga2_output = __salt__['cmd.run_all']([salt.utils.path.which('icinga2'),\n \"--version\"], python_shell=False)\n version = re.search(r'r\\d+\\.\\d+', icinga2_output['stdout']).group(0)\n # Return new certs path for icinga2 >= 2.8\n if int(version.split('.')[1]) >= 8:\n return '/var/lib/icinga2/certs/'\n # Keep backwords compatibility with older icinga2\n return '/etc/icinga2/pki/'\n"
] |
# -*- coding: utf-8 -*-
'''
Icinga2 state
=============
.. versionadded:: 2017.7.0
:depends: - Icinga2 Python module
:configuration: See :py:mod:`salt.modules.icinga2` for setup instructions.
The icinga2 module is used to execute commands.
Its output may be stored in a file or in a grain.
.. code-block:: yaml
command_id:
icinga2.generate_ticket:
- name: domain.tld
- output: "/tmp/query_id.txt"
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os.path
# Import Salt libs
from salt.ext import six
import salt.utils.files
import salt.utils.stringutils
from salt.utils.icinga2 import get_certs_path
def __virtual__():
'''
Only load if the icinga2 module is available in __salt__
'''
return 'icinga2.generate_ticket' in __salt__
def generate_ticket(name, output=None, grain=None, key=None, overwrite=True):
'''
Generate an icinga2 ticket on the master.
name
The domain name for which this ticket will be generated
output
grain: output in a grain
other: the file to store results
None: output to the result comment (default)
grain:
grain to store the output (need output=grain)
key:
the specified grain will be treated as a dictionary, the result
of this state will be stored under the specified key.
overwrite:
The file or grain will be overwritten if it already exists (default)
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Checking if execution is needed.
if output == 'grain':
if grain and not key:
if not overwrite and grain in __salt__['grains.ls']():
ret['comment'] = 'No execution needed. Grain {0} already set'.format(grain)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}'.format(grain)
return ret
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
if not overwrite and key in grain_value:
ret['comment'] = 'No execution needed. Grain {0}:{1} already set'.format(grain, key)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in grain: {0}:{1}'.format(grain, key)
return ret
else:
ret['result'] = False
ret['comment'] = "Error: output type 'grain' needs the grain parameter\n"
return ret
elif output:
if not overwrite and os.path.isfile(output):
ret['comment'] = 'No execution needed. File {0} already set'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, storing result in file: {0}'.format(output)
return ret
elif __opts__['test']:
ret['result'] = None
ret['comment'] = 'Ticket generation would be executed, not storing result'
return ret
# Executing the command.
ticket_res = __salt__['icinga2.generate_ticket'](name)
ticket = ticket_res['stdout']
if not ticket_res['retcode']:
ret['comment'] = six.text_type(ticket)
if output == 'grain':
if grain and not key:
__salt__['grains.setval'](grain, ticket)
ret['changes']['ticket'] = "Executed. Output into grain: {0}".format(grain)
elif grain:
if grain in __salt__['grains.ls']():
grain_value = __salt__['grains.get'](grain)
else:
grain_value = {}
grain_value[key] = ticket
__salt__['grains.setval'](grain, grain_value)
ret['changes']['ticket'] = "Executed. Output into grain: {0}:{1}".format(grain, key)
elif output:
ret['changes']['ticket'] = "Executed. Output into {0}".format(output)
with salt.utils.files.fopen(output, 'w') as output_file:
output_file.write(salt.utils.stringutils.to_str(ticket))
else:
ret['changes']['ticket'] = "Executed"
return ret
def generate_cert(name):
'''
Generate an icinga2 certificate and key on the client.
name
The domain name for which this certificate and key will be generated
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}{1}.crt".format(get_certs_path(), name)
key = "{0}{1}.key".format(get_certs_path(), name)
# Checking if execution is needed.
if os.path.isfile(cert) and os.path.isfile(key):
ret['comment'] = 'No execution needed. Cert: {0} and key: {1} already generated.'.format(cert, key)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate and key generation would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.generate_cert'](name)
if not cert_save['retcode']:
ret['comment'] = "Certificate and key generated"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
ret['changes']['key'] = "Executed. Key saved: {0}".format(key)
return ret
def save_cert(name, master):
'''
Save the certificate on master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}trusted-master.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already saved.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate save for icinga2 master would be executed'
return ret
# Executing the command.
cert_save = __salt__['icinga2.save_cert'](name, master)
if not cert_save['retcode']:
ret['comment'] = "Certificate for icinga2 master saved"
ret['changes']['cert'] = "Executed. Certificate saved: {0}".format(cert)
return ret
def request_cert(name, master, ticket, port="5665"):
'''
Request CA certificate from master icinga2 node.
name
The domain name for which this certificate will be saved
master
Icinga2 master node for which this certificate will be saved
ticket
Authentication ticket generated on icinga2 master
port
Icinga2 port, defaults to 5665
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
cert = "{0}ca.crt".format(get_certs_path())
# Checking if execution is needed.
if os.path.isfile(cert):
ret['comment'] = 'No execution needed. Cert: {0} already exists.'.format(cert)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Certificate request from icinga2 master would be executed'
return ret
# Executing the command.
cert_request = __salt__['icinga2.request_cert'](name, master, ticket, port)
if not cert_request['retcode']:
ret['comment'] = "Certificate request from icinga2 master executed"
ret['changes']['cert'] = "Executed. Certificate requested: {0}".format(cert)
return ret
ret['comment'] = "FAILED. Certificate requested failed with output: {0}".format(cert_request['stdout'])
ret['result'] = False
return ret
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
avail_images
|
python
|
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
|
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L110-L133
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
avail_locations
|
python
|
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
|
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L136-L159
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
avail_sizes
|
python
|
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
|
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L162-L185
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
boot
|
python
|
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
|
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L188-L272
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def get_linode_id_from_name(name):\n '''\n Returns the Linode ID for a VM from the provided name.\n\n name\n The name of the Linode from which to get the Linode ID. Required.\n '''\n nodes = _query('linode', 'list')['DATA']\n\n linode_id = ''\n for node in nodes:\n if name == node['LABEL']:\n linode_id = node['LINODEID']\n return linode_id\n\n if not linode_id:\n raise SaltCloudNotFound(\n 'The specified name, {0}, could not be found.'.format(name)\n )\n",
"def get_linode(kwargs=None, call=None):\n '''\n Returns data for a single named Linode.\n\n name\n The name of the Linode for which to get data. Can be used instead\n ``linode_id``. Note this will induce an additional API call\n compared to using ``linode_id``.\n\n linode_id\n The ID of the Linode for which to get data. Can be used instead of\n ``name``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_linode my-linode-config name=my-instance\n salt-cloud -f get_linode my-linode-config linode_id=1234567\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The get_linode function must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n name = kwargs.get('name', None)\n linode_id = kwargs.get('linode_id', None)\n if name is None and linode_id is None:\n raise SaltCloudSystemExit(\n 'The get_linode function requires either a \\'name\\' or a \\'linode_id\\'.'\n )\n\n if linode_id is None:\n linode_id = get_linode_id_from_name(name)\n\n result = _query('linode', 'list', args={'LinodeID': linode_id})\n\n return result['DATA'][0]\n",
"def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):\n '''\n Wait for a Job to return.\n\n linode_id\n The ID of the Linode to wait on. Required.\n\n job_id\n The ID of the job to wait for.\n\n timeout\n The amount of time to wait for a status to update.\n\n quiet\n Log status updates to debug logs when True. Otherwise, logs to info.\n '''\n interval = 5\n iterations = int(timeout / interval)\n\n for i in range(0, iterations):\n jobs_result = _query('linode',\n 'job.list',\n args={'LinodeID': linode_id})['DATA']\n if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:\n return True\n\n time.sleep(interval)\n log.log(\n logging.INFO if not quiet else logging.DEBUG,\n 'Still waiting on Job %s for Linode %s.', job_id, linode_id\n )\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
clone
|
python
|
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
|
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L275-L320
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
create
|
python
|
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
|
Create a single Linode VM.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L323-L537
|
[
"def clone(kwargs=None, call=None):\n '''\n Clone a Linode.\n\n linode_id\n The ID of the Linode to clone. Required.\n\n datacenter_id\n The ID of the Datacenter where the Linode will be placed. Required.\n\n plan_id\n The ID of the plan (size) of the Linode. Required.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The clone function must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n linode_id = kwargs.get('linode_id', None)\n datacenter_id = kwargs.get('datacenter_id', None)\n plan_id = kwargs.get('plan_id', None)\n required_params = [linode_id, datacenter_id, plan_id]\n\n for item in required_params:\n if item is None:\n raise SaltCloudSystemExit(\n 'The clone function requires a \\'linode_id\\', \\'datacenter_id\\', '\n 'and \\'plan_id\\' to be provided.'\n )\n\n clone_args = {\n 'LinodeID': linode_id,\n 'DatacenterID': datacenter_id,\n 'PlanID': plan_id\n }\n\n return _query('linode', 'clone', args=clone_args)\n",
"def create_config(kwargs=None, call=None):\n '''\n Creates a Linode Configuration Profile.\n\n name\n The name of the VM to create the config for.\n\n linode_id\n The ID of the Linode to create the configuration for.\n\n root_disk_id\n The Root Disk ID to be used for this config.\n\n swap_disk_id\n The Swap Disk ID to be used for this config.\n\n data_disk_id\n The Data Disk ID to be used for this config.\n\n .. versionadded:: 2016.3.0\n\n kernel_id\n The ID of the kernel to use for this configuration profile.\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The create_config function must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n name = kwargs.get('name', None)\n linode_id = kwargs.get('linode_id', None)\n root_disk_id = kwargs.get('root_disk_id', None)\n swap_disk_id = kwargs.get('swap_disk_id', None)\n data_disk_id = kwargs.get('data_disk_id', None)\n kernel_id = kwargs.get('kernel_id', None)\n\n if kernel_id is None:\n # 138 appears to always be the latest 64-bit kernel for Linux\n kernel_id = 138\n\n required_params = [name, linode_id, root_disk_id, swap_disk_id]\n for item in required_params:\n if item is None:\n raise SaltCloudSystemExit(\n 'The create_config functions requires a \\'name\\', \\'linode_id\\', '\n '\\'root_disk_id\\', and \\'swap_disk_id\\'.'\n )\n\n disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)\n if data_disk_id is not None:\n disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)\n\n config_args = {'LinodeID': linode_id,\n 'KernelID': kernel_id,\n 'Label': name,\n 'DiskList': disklist\n }\n\n result = _query('linode', 'config.create', args=config_args)\n\n return _clean_data(result)\n",
"def is_profile_configured(opts, provider, profile_name, vm_=None):\n '''\n Check if the requested profile contains the minimum required parameters for\n a profile.\n\n Required parameters include image and provider for all drivers, while some\n drivers also require size keys.\n\n .. versionadded:: 2015.8.0\n '''\n # Standard dict keys required by all drivers.\n required_keys = ['provider']\n alias, driver = provider.split(':')\n\n # Most drivers need an image to be specified, but some do not.\n non_image_drivers = ['nova', 'virtualbox', 'libvirt', 'softlayer', 'oneandone', 'profitbricks']\n\n # Most drivers need a size, but some do not.\n non_size_drivers = ['opennebula', 'parallels', 'proxmox', 'scaleway',\n 'softlayer', 'softlayer_hw', 'vmware', 'vsphere',\n 'virtualbox', 'libvirt', 'oneandone', 'profitbricks']\n\n provider_key = opts['providers'][alias][driver]\n profile_key = opts['providers'][alias][driver]['profiles'][profile_name]\n\n # If cloning on Linode, size and image are not necessary.\n # They are obtained from the to-be-cloned VM.\n if driver == 'linode' and profile_key.get('clonefrom', False):\n non_image_drivers.append('linode')\n non_size_drivers.append('linode')\n elif driver == 'gce' and 'sourceImage' in six.text_type(vm_.get('ex_disks_gce_struct')):\n non_image_drivers.append('gce')\n\n # If cloning on VMware, specifying image is not necessary.\n if driver == 'vmware' and 'image' not in list(profile_key.keys()):\n non_image_drivers.append('vmware')\n\n if driver not in non_image_drivers:\n required_keys.append('image')\n if driver == 'vmware':\n required_keys.append('datastore')\n elif driver in ['linode', 'virtualbox']:\n required_keys.append('clonefrom')\n elif driver == 'nova':\n nova_image_keys = ['image', 'block_device_mapping', 'block_device', 'boot_volume']\n if not any([key in provider_key for key in nova_image_keys]) and not any([key in profile_key for key in nova_image_keys]):\n required_keys.extend(nova_image_keys)\n\n if driver not in non_size_drivers:\n required_keys.append('size')\n\n # Check if required fields are supplied in the provider config. If they\n # are present, remove it from the required_keys list.\n for item in list(required_keys):\n if item in provider_key:\n required_keys.remove(item)\n\n # If a vm_ dict was passed in, use that information to get any other configs\n # that we might have missed thus far, such as a option provided in a map file.\n if vm_:\n for item in list(required_keys):\n if item in vm_:\n required_keys.remove(item)\n\n # Check for remaining required parameters in the profile config.\n for item in required_keys:\n if profile_key.get(item, None) is None:\n # There's at least one required configuration item which is not set.\n log.error(\n \"The required '%s' configuration setting is missing from \"\n \"the '%s' profile, which is configured under the '%s' alias.\",\n item, profile_name, alias\n )\n return False\n\n return True\n",
"def boot(name=None, kwargs=None, call=None):\n '''\n Boot a Linode.\n\n name\n The name of the Linode to boot. Can be used instead of ``linode_id``.\n\n linode_id\n The ID of the Linode to boot. If provided, will be used as an\n alternative to ``name`` and reduces the number of API calls to\n Linode by one. Will be preferred over ``name``.\n\n config_id\n The ID of the Config to boot. Required.\n\n check_running\n Defaults to True. If set to False, overrides the call to check if\n the VM is running before calling the linode.boot API call. Change\n ``check_running`` to True is useful during the boot call in the\n create function, since the new VM will not be running yet.\n\n Can be called as an action (which requires a name):\n\n .. code-block:: bash\n\n salt-cloud -a boot my-instance config_id=10\n\n ...or as a function (which requires either a name or linode_id):\n\n .. code-block:: bash\n\n salt-cloud -f boot my-linode-config name=my-instance config_id=10\n salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10\n '''\n if name is None and call == 'action':\n raise SaltCloudSystemExit(\n 'The boot action requires a \\'name\\'.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n linode_id = kwargs.get('linode_id', None)\n config_id = kwargs.get('config_id', None)\n check_running = kwargs.get('check_running', True)\n\n if call == 'function':\n name = kwargs.get('name', None)\n\n if name is None and linode_id is None:\n raise SaltCloudSystemExit(\n 'The boot function requires either a \\'name\\' or a \\'linode_id\\'.'\n )\n\n if config_id is None:\n raise SaltCloudSystemExit(\n 'The boot function requires a \\'config_id\\'.'\n )\n\n if linode_id is None:\n linode_id = get_linode_id_from_name(name)\n linode_item = name\n else:\n linode_item = linode_id\n\n # Check if Linode is running first\n if check_running is True:\n status = get_linode(kwargs={'linode_id': linode_id})['STATUS']\n if status == '1':\n raise SaltCloudSystemExit(\n 'Cannot boot Linode {0}. '\n 'Linode {0} is already running.'.format(linode_item)\n )\n\n # Boot the VM and get the JobID from Linode\n response = _query('linode', 'boot',\n args={'LinodeID': linode_id,\n 'ConfigID': config_id})['DATA']\n boot_job_id = response['JobID']\n\n if not _wait_for_job(linode_id, boot_job_id):\n log.error('Boot failed for Linode %s.', linode_item)\n return False\n\n return True\n",
"def get_password(vm_):\n r'''\n Return the password to use for a VM.\n\n vm\\_\n The configuration to obtain the password from.\n '''\n return config.get_cloud_config_value(\n 'password', vm_, __opts__,\n default=config.get_cloud_config_value(\n 'passwd', vm_, __opts__,\n search_global=False\n ),\n search_global=False\n )\n",
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def _clean_data(api_response):\n '''\n Returns the DATA response from a Linode API query as a single pre-formatted dictionary\n\n api_response\n The query to be cleaned.\n '''\n data = {}\n data.update(api_response['DATA'])\n\n if not data:\n response_data = api_response['DATA']\n data.update(response_data)\n\n return data\n",
"def get_linode_id_from_name(name):\n '''\n Returns the Linode ID for a VM from the provided name.\n\n name\n The name of the Linode from which to get the Linode ID. Required.\n '''\n nodes = _query('linode', 'list')['DATA']\n\n linode_id = ''\n for node in nodes:\n if name == node['LABEL']:\n linode_id = node['LINODEID']\n return linode_id\n\n if not linode_id:\n raise SaltCloudNotFound(\n 'The specified name, {0}, could not be found.'.format(name)\n )\n",
"def get_linode(kwargs=None, call=None):\n '''\n Returns data for a single named Linode.\n\n name\n The name of the Linode for which to get data. Can be used instead\n ``linode_id``. Note this will induce an additional API call\n compared to using ``linode_id``.\n\n linode_id\n The ID of the Linode for which to get data. Can be used instead of\n ``name``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_linode my-linode-config name=my-instance\n salt-cloud -f get_linode my-linode-config linode_id=1234567\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The get_linode function must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n name = kwargs.get('name', None)\n linode_id = kwargs.get('linode_id', None)\n if name is None and linode_id is None:\n raise SaltCloudSystemExit(\n 'The get_linode function requires either a \\'name\\' or a \\'linode_id\\'.'\n )\n\n if linode_id is None:\n linode_id = get_linode_id_from_name(name)\n\n result = _query('linode', 'list', args={'LinodeID': linode_id})\n\n return result['DATA'][0]\n",
"def _validate_name(name):\n '''\n Checks if the provided name fits Linode's labeling parameters.\n\n .. versionadded:: 2015.5.6\n\n name\n The VM name to validate\n '''\n name = six.text_type(name)\n name_length = len(name)\n regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')\n\n if name_length < 3 or name_length > 48:\n ret = False\n elif not re.match(regex, name):\n ret = False\n else:\n ret = True\n\n if ret is False:\n log.warning(\n 'A Linode label may only contain ASCII letters or numbers, dashes, and '\n 'underscores, must begin and end with letters or numbers, and be at least '\n 'three characters in length.'\n )\n\n return ret\n",
"def get_plan_id(kwargs=None, call=None):\n '''\n Returns the Linode Plan ID.\n\n label\n The label, or name, of the plan to get the ID from.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_plan_id linode label=\"Linode 1024\"\n '''\n if call == 'action':\n raise SaltCloudException(\n 'The show_instance action must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n label = kwargs.get('label', None)\n if label is None:\n raise SaltCloudException(\n 'The get_plan_id function requires a \\'label\\'.'\n )\n\n label = _decode_linode_plan_label(label)\n\n return label\n",
"def get_datacenter_id(location):\n '''\n Returns the Linode Datacenter ID.\n\n location\n The location, or name, of the datacenter to get the ID from.\n '''\n\n return avail_locations()[location]['DATACENTERID']\n",
"def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):\n '''\n Wait for a certain status from Linode.\n\n linode_id\n The ID of the Linode to wait on. Required.\n\n status\n The status to look for to update.\n\n timeout\n The amount of time to wait for a status to update.\n\n quiet\n Log status updates to debug logs when False. Otherwise, logs to info.\n '''\n if status is None:\n status = _get_status_id_by_name('brand_new')\n\n status_desc_waiting = _get_status_descr_by_id(status)\n\n interval = 5\n iterations = int(timeout / interval)\n\n for i in range(0, iterations):\n result = get_linode(kwargs={'linode_id': linode_id})\n\n if result['STATUS'] == status:\n return True\n\n status_desc_result = _get_status_descr_by_id(result['STATUS'])\n\n time.sleep(interval)\n log.log(\n logging.INFO if not quiet else logging.DEBUG,\n 'Status for Linode %s is \\'%s\\', waiting for \\'%s\\'.',\n linode_id, status_desc_result, status_desc_waiting\n )\n\n return False\n",
"def _get_status_id_by_name(status_name):\n '''\n Return linode status description by internalstatus name\n\n status_name\n internal linode VM status name\n '''\n return LINODE_STATUS.get(status_name, {}).get('code', None)\n",
"def update_linode(linode_id, update_args=None):\n '''\n Updates a Linode's properties.\n\n linode_id\n The ID of the Linode to shutdown. Required.\n\n update_args\n The args to update the Linode with. Must be in dictionary form.\n '''\n update_args.update({'LinodeID': linode_id})\n\n result = _query('linode', 'update', args=update_args)\n\n return _clean_data(result)\n",
"def get_private_ip(vm_):\n '''\n Return True if a private ip address is requested\n '''\n return config.get_cloud_config_value(\n 'assign_private_ip', vm_, __opts__, default=False\n )\n",
"def create_private_ip(linode_id):\n r'''\n Creates a private IP for the specified Linode.\n\n linode_id\n The ID of the Linode to create the IP address for.\n '''\n kwargs = {'LinodeID': linode_id}\n result = _query('linode', 'ip.addprivate', args=kwargs)\n\n return _clean_data(result)\n",
"def _get_ssh_interface(vm_):\n '''\n Return the ssh_interface type to connect to. Either 'public_ips' (default)\n or 'private_ips'.\n '''\n return config.get_cloud_config_value(\n 'ssh_interface', vm_, __opts__, default='public_ips',\n search_global=False\n )\n",
"def get_config_id(kwargs=None, call=None):\n '''\n Returns a config_id for a given linode.\n\n .. versionadded:: 2015.8.0\n\n name\n The name of the Linode for which to get the config_id. Can be used instead\n of ``linode_id``.h\n\n linode_id\n The ID of the Linode for which to get the config_id. Can be used instead\n of ``name``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_config_id my-linode-config name=my-linode\n salt-cloud -f get_config_id my-linode-config linode_id=1234567\n '''\n if call == 'action':\n raise SaltCloudException(\n 'The get_config_id function must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n name = kwargs.get('name', None)\n linode_id = kwargs.get('linode_id', None)\n if name is None and linode_id is None:\n raise SaltCloudSystemExit(\n 'The get_config_id function requires either a \\'name\\' or a \\'linode_id\\' '\n 'to be provided.'\n )\n if linode_id is None:\n linode_id = get_linode_id_from_name(name)\n\n response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']\n config_id = {'config_id': response[0]['ConfigID']}\n\n return config_id\n",
"def create_disk_from_distro(vm_, linode_id, swap_size=None):\n r'''\n Creates the disk for the Linode from the distribution.\n\n vm\\_\n The VM profile to create the disk for.\n\n linode_id\n The ID of the Linode to create the distribution disk for. Required.\n\n swap_size\n The size of the disk, in MB.\n\n '''\n kwargs = {}\n\n if swap_size is None:\n swap_size = get_swap_size(vm_)\n\n pub_key = get_pub_key(vm_)\n root_password = get_password(vm_)\n\n if pub_key:\n kwargs.update({'rootSSHKey': pub_key})\n if root_password:\n kwargs.update({'rootPass': root_password})\n else:\n raise SaltCloudConfigError(\n 'The Linode driver requires a password.'\n )\n\n kwargs.update({'LinodeID': linode_id,\n 'DistributionID': get_distribution_id(vm_),\n 'Label': vm_['name'],\n 'Size': get_disk_size(vm_, swap_size, linode_id)})\n\n result = _query('linode', 'disk.createfromdistribution', args=kwargs)\n\n return _clean_data(result)\n",
"def create_swap_disk(vm_, linode_id, swap_size=None):\n r'''\n Creates the disk for the specified Linode.\n\n vm\\_\n The VM profile to create the swap disk for.\n\n linode_id\n The ID of the Linode to create the swap disk for.\n\n swap_size\n The size of the disk, in MB.\n '''\n kwargs = {}\n\n if not swap_size:\n swap_size = get_swap_size(vm_)\n\n kwargs.update({'LinodeID': linode_id,\n 'Label': vm_['name'],\n 'Type': 'swap',\n 'Size': swap_size\n })\n\n result = _query('linode', 'disk.create', args=kwargs)\n\n return _clean_data(result)\n",
"def get_ips(linode_id=None):\n '''\n Returns public and private IP addresses.\n\n linode_id\n Limits the IP addresses returned to the specified Linode ID.\n '''\n if linode_id:\n ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})\n else:\n ips = _query('linode', 'ip.list')\n\n ips = ips['DATA']\n ret = {}\n\n for item in ips:\n node_id = six.text_type(item['LINODEID'])\n if item['ISPUBLIC'] == 1:\n key = 'public_ips'\n else:\n key = 'private_ips'\n\n if ret.get(node_id) is None:\n ret.update({node_id: {'public_ips': [], 'private_ips': []}})\n ret[node_id][key].append(item['IPADDRESS'])\n\n # If linode_id was specified, only return the ips, and not the\n # dictionary based on the linode ID as a key.\n if linode_id:\n _all_ips = {'public_ips': [], 'private_ips': []}\n matching_id = ret.get(six.text_type(linode_id))\n if matching_id:\n _all_ips['private_ips'] = matching_id['private_ips']\n _all_ips['public_ips'] = matching_id['public_ips']\n\n ret = _all_ips\n\n return ret\n",
"def _get_status_descr_by_id(status_id):\n '''\n Return linode status by ID\n\n status_id\n linode VM status ID\n '''\n for status_name, status_data in six.iteritems(LINODE_STATUS):\n if status_data['code'] == int(status_id):\n return status_data['descr']\n return LINODE_STATUS.get(status_id, None)\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
create_config
|
python
|
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
|
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L540-L603
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def _clean_data(api_response):\n '''\n Returns the DATA response from a Linode API query as a single pre-formatted dictionary\n\n api_response\n The query to be cleaned.\n '''\n data = {}\n data.update(api_response['DATA'])\n\n if not data:\n response_data = api_response['DATA']\n data.update(response_data)\n\n return data\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
create_disk_from_distro
|
python
|
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
|
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L606-L644
|
[
"def get_password(vm_):\n r'''\n Return the password to use for a VM.\n\n vm\\_\n The configuration to obtain the password from.\n '''\n return config.get_cloud_config_value(\n 'password', vm_, __opts__,\n default=config.get_cloud_config_value(\n 'passwd', vm_, __opts__,\n search_global=False\n ),\n search_global=False\n )\n",
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def _clean_data(api_response):\n '''\n Returns the DATA response from a Linode API query as a single pre-formatted dictionary\n\n api_response\n The query to be cleaned.\n '''\n data = {}\n data.update(api_response['DATA'])\n\n if not data:\n response_data = api_response['DATA']\n data.update(response_data)\n\n return data\n",
"def get_swap_size(vm_):\n r'''\n Returns the amoutn of swap space to be used in MB.\n\n vm\\_\n The VM profile to obtain the swap size from.\n '''\n return config.get_cloud_config_value(\n 'swap', vm_, __opts__, default=128\n )\n",
"def get_pub_key(vm_):\n r'''\n Return the SSH pubkey.\n\n vm\\_\n The configuration to obtain the public key from.\n '''\n return config.get_cloud_config_value(\n 'ssh_pubkey', vm_, __opts__, search_global=False\n )\n",
"def get_distribution_id(vm_):\n r'''\n Returns the distribution ID for a VM\n\n vm\\_\n The VM to get the distribution ID for\n '''\n distributions = _query('avail', 'distributions')['DATA']\n vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)\n\n distro_id = ''\n\n for distro in distributions:\n if vm_image_name == distro['LABEL']:\n distro_id = distro['DISTRIBUTIONID']\n return distro_id\n\n if not distro_id:\n raise SaltCloudNotFound(\n 'The DistributionID for the \\'{0}\\' profile could not be found.\\n'\n 'The \\'{1}\\' instance could not be provisioned. The following distributions '\n 'are available:\\n{2}'.format(\n vm_image_name,\n vm_['name'],\n pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))\n )\n )\n",
"def get_disk_size(vm_, swap, linode_id):\n r'''\n Returns the size of of the root disk in MB.\n\n vm\\_\n The VM to get the disk size for.\n '''\n disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']\n return config.get_cloud_config_value(\n 'disk_size', vm_, __opts__, default=disk_size - swap\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
create_swap_disk
|
python
|
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
|
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L647-L673
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def _clean_data(api_response):\n '''\n Returns the DATA response from a Linode API query as a single pre-formatted dictionary\n\n api_response\n The query to be cleaned.\n '''\n data = {}\n data.update(api_response['DATA'])\n\n if not data:\n response_data = api_response['DATA']\n data.update(response_data)\n\n return data\n",
"def get_swap_size(vm_):\n r'''\n Returns the amoutn of swap space to be used in MB.\n\n vm\\_\n The VM profile to obtain the swap size from.\n '''\n return config.get_cloud_config_value(\n 'swap', vm_, __opts__, default=128\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
create_data_disk
|
python
|
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
|
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L676-L701
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def _clean_data(api_response):\n '''\n Returns the DATA response from a Linode API query as a single pre-formatted dictionary\n\n api_response\n The query to be cleaned.\n '''\n data = {}\n data.update(api_response['DATA'])\n\n if not data:\n response_data = api_response['DATA']\n data.update(response_data)\n\n return data\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
create_private_ip
|
python
|
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
|
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L704-L714
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def _clean_data(api_response):\n '''\n Returns the DATA response from a Linode API query as a single pre-formatted dictionary\n\n api_response\n The query to be cleaned.\n '''\n data = {}\n data.update(api_response['DATA'])\n\n if not data:\n response_data = api_response['DATA']\n data.update(response_data)\n\n return data\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_config_id
|
python
|
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
|
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L764-L806
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def get_linode_id_from_name(name):\n '''\n Returns the Linode ID for a VM from the provided name.\n\n name\n The name of the Linode from which to get the Linode ID. Required.\n '''\n nodes = _query('linode', 'list')['DATA']\n\n linode_id = ''\n for node in nodes:\n if name == node['LABEL']:\n linode_id = node['LINODEID']\n return linode_id\n\n if not linode_id:\n raise SaltCloudNotFound(\n 'The specified name, {0}, could not be found.'.format(name)\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_disk_size
|
python
|
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
|
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L820-L830
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def get_linode(kwargs=None, call=None):\n '''\n Returns data for a single named Linode.\n\n name\n The name of the Linode for which to get data. Can be used instead\n ``linode_id``. Note this will induce an additional API call\n compared to using ``linode_id``.\n\n linode_id\n The ID of the Linode for which to get data. Can be used instead of\n ``name``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_linode my-linode-config name=my-instance\n salt-cloud -f get_linode my-linode-config linode_id=1234567\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The get_linode function must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n name = kwargs.get('name', None)\n linode_id = kwargs.get('linode_id', None)\n if name is None and linode_id is None:\n raise SaltCloudSystemExit(\n 'The get_linode function requires either a \\'name\\' or a \\'linode_id\\'.'\n )\n\n if linode_id is None:\n linode_id = get_linode_id_from_name(name)\n\n result = _query('linode', 'list', args={'LinodeID': linode_id})\n\n return result['DATA'][0]\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_data_disk_size
|
python
|
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
|
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L833-L843
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def get_linode(kwargs=None, call=None):\n '''\n Returns data for a single named Linode.\n\n name\n The name of the Linode for which to get data. Can be used instead\n ``linode_id``. Note this will induce an additional API call\n compared to using ``linode_id``.\n\n linode_id\n The ID of the Linode for which to get data. Can be used instead of\n ``name``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_linode my-linode-config name=my-instance\n salt-cloud -f get_linode my-linode-config linode_id=1234567\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The get_linode function must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n name = kwargs.get('name', None)\n linode_id = kwargs.get('linode_id', None)\n if name is None and linode_id is None:\n raise SaltCloudSystemExit(\n 'The get_linode function requires either a \\'name\\' or a \\'linode_id\\'.'\n )\n\n if linode_id is None:\n linode_id = get_linode_id_from_name(name)\n\n result = _query('linode', 'list', args={'LinodeID': linode_id})\n\n return result['DATA'][0]\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_distribution_id
|
python
|
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
|
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L846-L872
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_ips
|
python
|
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
|
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L875-L912
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_linode
|
python
|
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
|
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L915-L955
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def get_linode_id_from_name(name):\n '''\n Returns the Linode ID for a VM from the provided name.\n\n name\n The name of the Linode from which to get the Linode ID. Required.\n '''\n nodes = _query('linode', 'list')['DATA']\n\n linode_id = ''\n for node in nodes:\n if name == node['LABEL']:\n linode_id = node['LINODEID']\n return linode_id\n\n if not linode_id:\n raise SaltCloudNotFound(\n 'The specified name, {0}, could not be found.'.format(name)\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_linode_id_from_name
|
python
|
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
|
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L958-L976
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_password
|
python
|
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
|
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L979-L993
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
_decode_linode_plan_label
|
python
|
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
|
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L996-L1049
|
[
"def avail_sizes(call=None):\n '''\n Return available Linode sizes.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud --list-sizes my-linode-config\n salt-cloud -f avail_sizes my-linode-config\n '''\n if call == 'action':\n raise SaltCloudException(\n 'The avail_locations function must be called with -f or --function.'\n )\n\n response = _query('avail', 'LinodePlans')\n\n ret = {}\n for item in response['DATA']:\n name = item['LABEL']\n ret[name] = item\n\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_plan_id
|
python
|
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
|
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1052-L1081
|
[
"def _decode_linode_plan_label(label):\n '''\n Attempts to decode a user-supplied Linode plan label\n into the format in Linode API output\n\n label\n The label, or name, of the plan to decode.\n\n Example:\n `Linode 2048` will decode to `Linode 2GB`\n '''\n sizes = avail_sizes()\n\n if label not in sizes:\n if 'GB' in label:\n raise SaltCloudException(\n 'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)\n )\n else:\n plan = label.split()\n\n if len(plan) != 2:\n raise SaltCloudException(\n 'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)\n )\n\n plan_type = plan[0]\n try:\n plan_size = int(plan[1])\n except TypeError:\n plan_size = 0\n log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)\n\n if plan_type == 'Linode' and plan_size == 1024:\n plan_type = 'Nanode'\n\n plan_size = plan_size/1024\n new_label = \"{} {}GB\".format(plan_type, plan_size)\n\n if new_label not in sizes:\n raise SaltCloudException(\n 'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)\n )\n\n log.warning(\n 'An outdated Linode plan label was detected in your Cloud '\n 'Profile (%s). Please update the profile to use the new '\n 'label format (%s) for the requested Linode plan size.',\n label, new_label\n )\n\n label = new_label\n\n return sizes[label]['PLANID']\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
get_vm_size
|
python
|
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
|
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1128-L1146
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def avail_sizes(call=None):\n '''\n Return available Linode sizes.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud --list-sizes my-linode-config\n salt-cloud -f avail_sizes my-linode-config\n '''\n if call == 'action':\n raise SaltCloudException(\n 'The avail_locations function must be called with -f or --function.'\n )\n\n response = _query('avail', 'LinodePlans')\n\n ret = {}\n for item in response['DATA']:\n name = item['LABEL']\n ret[name] = item\n\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
list_nodes_min
|
python
|
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
|
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1199-L1231
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def _get_status_descr_by_id(status_id):\n '''\n Return linode status by ID\n\n status_id\n linode VM status ID\n '''\n for status_name, status_data in six.iteritems(LINODE_STATUS):\n if status_data['code'] == int(status_id):\n return status_data['descr']\n return LINODE_STATUS.get(status_id, None)\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
reboot
|
python
|
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
|
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1243-L1272
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def _clean_data(api_response):\n '''\n Returns the DATA response from a Linode API query as a single pre-formatted dictionary\n\n api_response\n The query to be cleaned.\n '''\n data = {}\n data.update(api_response['DATA'])\n\n if not data:\n response_data = api_response['DATA']\n data.update(response_data)\n\n return data\n",
"def get_linode_id_from_name(name):\n '''\n Returns the Linode ID for a VM from the provided name.\n\n name\n The name of the Linode from which to get the Linode ID. Required.\n '''\n nodes = _query('linode', 'list')['DATA']\n\n linode_id = ''\n for node in nodes:\n if name == node['LABEL']:\n linode_id = node['LINODEID']\n return linode_id\n\n if not linode_id:\n raise SaltCloudNotFound(\n 'The specified name, {0}, could not be found.'.format(name)\n )\n",
"def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):\n '''\n Wait for a Job to return.\n\n linode_id\n The ID of the Linode to wait on. Required.\n\n job_id\n The ID of the job to wait for.\n\n timeout\n The amount of time to wait for a status to update.\n\n quiet\n Log status updates to debug logs when True. Otherwise, logs to info.\n '''\n interval = 5\n iterations = int(timeout / interval)\n\n for i in range(0, iterations):\n jobs_result = _query('linode',\n 'job.list',\n args={'LinodeID': linode_id})['DATA']\n if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:\n return True\n\n time.sleep(interval)\n log.log(\n logging.INFO if not quiet else logging.DEBUG,\n 'Still waiting on Job %s for Linode %s.', job_id, linode_id\n )\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
show_instance
|
python
|
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
|
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1275-L1315
|
[
"def get_linode_id_from_name(name):\n '''\n Returns the Linode ID for a VM from the provided name.\n\n name\n The name of the Linode from which to get the Linode ID. Required.\n '''\n nodes = _query('linode', 'list')['DATA']\n\n linode_id = ''\n for node in nodes:\n if name == node['LABEL']:\n linode_id = node['LINODEID']\n return linode_id\n\n if not linode_id:\n raise SaltCloudNotFound(\n 'The specified name, {0}, could not be found.'.format(name)\n )\n",
"def get_linode(kwargs=None, call=None):\n '''\n Returns data for a single named Linode.\n\n name\n The name of the Linode for which to get data. Can be used instead\n ``linode_id``. Note this will induce an additional API call\n compared to using ``linode_id``.\n\n linode_id\n The ID of the Linode for which to get data. Can be used instead of\n ``name``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_linode my-linode-config name=my-instance\n salt-cloud -f get_linode my-linode-config linode_id=1234567\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The get_linode function must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n name = kwargs.get('name', None)\n linode_id = kwargs.get('linode_id', None)\n if name is None and linode_id is None:\n raise SaltCloudSystemExit(\n 'The get_linode function requires either a \\'name\\' or a \\'linode_id\\'.'\n )\n\n if linode_id is None:\n linode_id = get_linode_id_from_name(name)\n\n result = _query('linode', 'list', args={'LinodeID': linode_id})\n\n return result['DATA'][0]\n",
"def get_ips(linode_id=None):\n '''\n Returns public and private IP addresses.\n\n linode_id\n Limits the IP addresses returned to the specified Linode ID.\n '''\n if linode_id:\n ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})\n else:\n ips = _query('linode', 'ip.list')\n\n ips = ips['DATA']\n ret = {}\n\n for item in ips:\n node_id = six.text_type(item['LINODEID'])\n if item['ISPUBLIC'] == 1:\n key = 'public_ips'\n else:\n key = 'private_ips'\n\n if ret.get(node_id) is None:\n ret.update({node_id: {'public_ips': [], 'private_ips': []}})\n ret[node_id][key].append(item['IPADDRESS'])\n\n # If linode_id was specified, only return the ips, and not the\n # dictionary based on the linode ID as a key.\n if linode_id:\n _all_ips = {'public_ips': [], 'private_ips': []}\n matching_id = ret.get(six.text_type(linode_id))\n if matching_id:\n _all_ips['private_ips'] = matching_id['private_ips']\n _all_ips['public_ips'] = matching_id['public_ips']\n\n ret = _all_ips\n\n return ret\n",
"def _get_status_descr_by_id(status_id):\n '''\n Return linode status by ID\n\n status_id\n linode VM status ID\n '''\n for status_name, status_data in six.iteritems(LINODE_STATUS):\n if status_data['code'] == int(status_id):\n return status_data['descr']\n return LINODE_STATUS.get(status_id, None)\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
show_pricing
|
python
|
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
|
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1318-L1360
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def get_plan_id(kwargs=None, call=None):\n '''\n Returns the Linode Plan ID.\n\n label\n The label, or name, of the plan to get the ID from.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_plan_id linode label=\"Linode 1024\"\n '''\n if call == 'action':\n raise SaltCloudException(\n 'The show_instance action must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n label = kwargs.get('label', None)\n if label is None:\n raise SaltCloudException(\n 'The get_plan_id function requires a \\'label\\'.'\n )\n\n label = _decode_linode_plan_label(label)\n\n return label\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
start
|
python
|
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
|
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1363-L1398
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def get_linode_id_from_name(name):\n '''\n Returns the Linode ID for a VM from the provided name.\n\n name\n The name of the Linode from which to get the Linode ID. Required.\n '''\n nodes = _query('linode', 'list')['DATA']\n\n linode_id = ''\n for node in nodes:\n if name == node['LABEL']:\n linode_id = node['LINODEID']\n return linode_id\n\n if not linode_id:\n raise SaltCloudNotFound(\n 'The specified name, {0}, could not be found.'.format(name)\n )\n",
"def get_linode(kwargs=None, call=None):\n '''\n Returns data for a single named Linode.\n\n name\n The name of the Linode for which to get data. Can be used instead\n ``linode_id``. Note this will induce an additional API call\n compared to using ``linode_id``.\n\n linode_id\n The ID of the Linode for which to get data. Can be used instead of\n ``name``.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt-cloud -f get_linode my-linode-config name=my-instance\n salt-cloud -f get_linode my-linode-config linode_id=1234567\n '''\n if call == 'action':\n raise SaltCloudSystemExit(\n 'The get_linode function must be called with -f or --function.'\n )\n\n if kwargs is None:\n kwargs = {}\n\n name = kwargs.get('name', None)\n linode_id = kwargs.get('linode_id', None)\n if name is None and linode_id is None:\n raise SaltCloudSystemExit(\n 'The get_linode function requires either a \\'name\\' or a \\'linode_id\\'.'\n )\n\n if linode_id is None:\n linode_id = get_linode_id_from_name(name)\n\n result = _query('linode', 'list', args={'LinodeID': linode_id})\n\n return result['DATA'][0]\n",
"def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):\n '''\n Wait for a Job to return.\n\n linode_id\n The ID of the Linode to wait on. Required.\n\n job_id\n The ID of the job to wait for.\n\n timeout\n The amount of time to wait for a status to update.\n\n quiet\n Log status updates to debug logs when True. Otherwise, logs to info.\n '''\n interval = 5\n iterations = int(timeout / interval)\n\n for i in range(0, iterations):\n jobs_result = _query('linode',\n 'job.list',\n args={'LinodeID': linode_id})['DATA']\n if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:\n return True\n\n time.sleep(interval)\n log.log(\n logging.INFO if not quiet else logging.DEBUG,\n 'Still waiting on Job %s for Linode %s.', job_id, linode_id\n )\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
update_linode
|
python
|
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
|
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1438-L1452
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def _clean_data(api_response):\n '''\n Returns the DATA response from a Linode API query as a single pre-formatted dictionary\n\n api_response\n The query to be cleaned.\n '''\n data = {}\n data.update(api_response['DATA'])\n\n if not data:\n response_data = api_response['DATA']\n data.update(response_data)\n\n return data\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
_clean_data
|
python
|
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
|
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1455-L1469
| null |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
_list_linodes
|
python
|
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
|
Helper function to format and parse linode data
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1472-L1502
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n",
"def get_ips(linode_id=None):\n '''\n Returns public and private IP addresses.\n\n linode_id\n Limits the IP addresses returned to the specified Linode ID.\n '''\n if linode_id:\n ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})\n else:\n ips = _query('linode', 'ip.list')\n\n ips = ips['DATA']\n ret = {}\n\n for item in ips:\n node_id = six.text_type(item['LINODEID'])\n if item['ISPUBLIC'] == 1:\n key = 'public_ips'\n else:\n key = 'private_ips'\n\n if ret.get(node_id) is None:\n ret.update({node_id: {'public_ips': [], 'private_ips': []}})\n ret[node_id][key].append(item['IPADDRESS'])\n\n # If linode_id was specified, only return the ips, and not the\n # dictionary based on the linode ID as a key.\n if linode_id:\n _all_ips = {'public_ips': [], 'private_ips': []}\n matching_id = ret.get(six.text_type(linode_id))\n if matching_id:\n _all_ips['private_ips'] = matching_id['private_ips']\n _all_ips['public_ips'] = matching_id['public_ips']\n\n ret = _all_ips\n\n return ret\n",
"def _get_status_descr_by_id(status_id):\n '''\n Return linode status by ID\n\n status_id\n linode VM status ID\n '''\n for status_name, status_data in six.iteritems(LINODE_STATUS):\n if status_data['code'] == int(status_id):\n return status_data['descr']\n return LINODE_STATUS.get(status_id, None)\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
_query
|
python
|
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
|
Make a web call to the Linode API.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1505-L1582
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('apikey', 'password',)\n )\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
_wait_for_job
|
python
|
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
|
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1585-L1616
|
[
"def _query(action=None,\n command=None,\n args=None,\n method='GET',\n header_dict=None,\n data=None,\n url='https://api.linode.com/'):\n '''\n Make a web call to the Linode API.\n '''\n global LASTCALL\n vm_ = get_configured_provider()\n\n ratelimit_sleep = config.get_cloud_config_value(\n 'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,\n )\n apikey = config.get_cloud_config_value(\n 'apikey', vm_, __opts__, search_global=False\n )\n\n if not isinstance(args, dict):\n args = {}\n\n if 'api_key' not in args.keys():\n args['api_key'] = apikey\n\n if action and 'api_action' not in args.keys():\n args['api_action'] = '{0}.{1}'.format(action, command)\n\n if header_dict is None:\n header_dict = {}\n\n if method != 'POST':\n header_dict['Accept'] = 'application/json'\n\n decode = True\n if method == 'DELETE':\n decode = False\n\n now = int(time.mktime(datetime.datetime.now().timetuple()))\n if LASTCALL >= now:\n time.sleep(ratelimit_sleep)\n\n result = __utils__['http.query'](\n url,\n method,\n params=args,\n data=data,\n header_dict=header_dict,\n decode=decode,\n decode_type='json',\n text=True,\n status=True,\n hide_fields=['api_key', 'rootPass'],\n opts=__opts__,\n )\n\n if 'ERRORARRAY' in result['dict']:\n if result['dict']['ERRORARRAY']:\n error_list = []\n\n for error in result['dict']['ERRORARRAY']:\n msg = error['ERRORMESSAGE']\n\n if msg == \"Authentication failed\":\n raise SaltCloudSystemExit(\n 'Linode API Key is expired or invalid'\n )\n else:\n error_list.append(msg)\n raise SaltCloudException(\n 'Linode API reported error(s): {}'.format(\", \".join(error_list))\n )\n\n LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))\n log.debug('Linode Response Status Code: %s', result['status'])\n\n return result['dict']\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
_wait_for_status
|
python
|
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
|
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1619-L1658
| null |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
_get_status_descr_by_id
|
python
|
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
|
Return linode status by ID
status_id
linode VM status ID
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1661-L1671
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n"
] |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/cloud/clouds/linode.py
|
_validate_name
|
python
|
def _validate_name(name):
'''
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
'''
name = six.text_type(name)
name_length = len(name)
regex = re.compile(r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$')
if name_length < 3 or name_length > 48:
ret = False
elif not re.match(regex, name):
ret = False
else:
ret = True
if ret is False:
log.warning(
'A Linode label may only contain ASCII letters or numbers, dashes, and '
'underscores, must begin and end with letters or numbers, and be at least '
'three characters in length.'
)
return ret
|
Checks if the provided name fits Linode's labeling parameters.
.. versionadded:: 2015.5.6
name
The VM name to validate
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/linode.py#L1684-L1711
| null |
# -*- coding: utf-8 -*-
'''
Linode Cloud Module using Linode's REST API
===========================================
The Linode cloud module is used to control access to the Linode VPS system.
Use of this module only requires the ``apikey`` parameter. However, the default root password for new instances
also needs to be set. The password needs to be 8 characters and contain lowercase, uppercase, and numbers.
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or ``/etc/salt/cloud.providers.d/linode.conf``:
.. code-block:: yaml
my-linode-provider:
apikey: f4ZsmwtB1c7f85Jdu43RgXVDFlNjuJaeIYV8QMftTqKScEB2vSosFSr...
password: F00barbaz
driver: linode
linode-profile:
provider: my-linode-provider
size: Linode 1024
image: CentOS 7
location: London, England, UK
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import pprint
import re
import time
import datetime
# Import Salt Libs
import salt.config as config
from salt.ext import six
from salt.ext.six.moves import range
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudException,
SaltCloudNotFound,
SaltCloudSystemExit
)
# Get logging started
log = logging.getLogger(__name__)
# The epoch of the last time a query was made
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
# Human-readable status fields (documentation: https://www.linode.com/api/linode/linode.list)
LINODE_STATUS = {
'boot_failed': {
'code': -2,
'descr': 'Boot Failed (not in use)',
},
'beeing_created': {
'code': -1,
'descr': 'Being Created',
},
'brand_new': {
'code': 0,
'descr': 'Brand New',
},
'running': {
'code': 1,
'descr': 'Running',
},
'poweroff': {
'code': 2,
'descr': 'Powered Off',
},
'shutdown': {
'code': 3,
'descr': 'Shutting Down (not in use)',
},
'save_to_disk': {
'code': 4,
'descr': 'Saved to Disk (not in use)',
},
}
__virtualname__ = 'linode'
# Only load in this module if the Linode configurations are in place
def __virtual__():
'''
Check for Linode configs.
'''
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('apikey', 'password',)
)
def avail_images(call=None):
'''
Return available Linode images.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-linode-config
salt-cloud -f avail_images my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_images function must be called with -f or --function.'
)
response = _query('avail', 'distributions')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def avail_locations(call=None):
'''
Return available Linode datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-linode-config
salt-cloud -f avail_locations my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'datacenters')
ret = {}
for item in response['DATA']:
name = item['LOCATION']
ret[name] = item
return ret
def avail_sizes(call=None):
'''
Return available Linode sizes.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-linode-config
salt-cloud -f avail_sizes my-linode-config
'''
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
response = _query('avail', 'LinodePlans')
ret = {}
for item in response['DATA']:
name = item['LABEL']
ret[name] = item
return ret
def boot(name=None, kwargs=None, call=None):
'''
Boot a Linode.
name
The name of the Linode to boot. Can be used instead of ``linode_id``.
linode_id
The ID of the Linode to boot. If provided, will be used as an
alternative to ``name`` and reduces the number of API calls to
Linode by one. Will be preferred over ``name``.
config_id
The ID of the Config to boot. Required.
check_running
Defaults to True. If set to False, overrides the call to check if
the VM is running before calling the linode.boot API call. Change
``check_running`` to True is useful during the boot call in the
create function, since the new VM will not be running yet.
Can be called as an action (which requires a name):
.. code-block:: bash
salt-cloud -a boot my-instance config_id=10
...or as a function (which requires either a name or linode_id):
.. code-block:: bash
salt-cloud -f boot my-linode-config name=my-instance config_id=10
salt-cloud -f boot my-linode-config linode_id=1225876 config_id=10
'''
if name is None and call == 'action':
raise SaltCloudSystemExit(
'The boot action requires a \'name\'.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
config_id = kwargs.get('config_id', None)
check_running = kwargs.get('check_running', True)
if call == 'function':
name = kwargs.get('name', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The boot function requires either a \'name\' or a \'linode_id\'.'
)
if config_id is None:
raise SaltCloudSystemExit(
'The boot function requires a \'config_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
linode_item = name
else:
linode_item = linode_id
# Check if Linode is running first
if check_running is True:
status = get_linode(kwargs={'linode_id': linode_id})['STATUS']
if status == '1':
raise SaltCloudSystemExit(
'Cannot boot Linode {0}. '
'Linode {0} is already running.'.format(linode_item)
)
# Boot the VM and get the JobID from Linode
response = _query('linode', 'boot',
args={'LinodeID': linode_id,
'ConfigID': config_id})['DATA']
boot_job_id = response['JobID']
if not _wait_for_job(linode_id, boot_job_id):
log.error('Boot failed for Linode %s.', linode_item)
return False
return True
def clone(kwargs=None, call=None):
'''
Clone a Linode.
linode_id
The ID of the Linode to clone. Required.
datacenter_id
The ID of the Datacenter where the Linode will be placed. Required.
plan_id
The ID of the plan (size) of the Linode. Required.
CLI Example:
.. code-block:: bash
salt-cloud -f clone my-linode-config linode_id=1234567 datacenter_id=2 plan_id=5
'''
if call == 'action':
raise SaltCloudSystemExit(
'The clone function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
linode_id = kwargs.get('linode_id', None)
datacenter_id = kwargs.get('datacenter_id', None)
plan_id = kwargs.get('plan_id', None)
required_params = [linode_id, datacenter_id, plan_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The clone function requires a \'linode_id\', \'datacenter_id\', '
'and \'plan_id\' to be provided.'
)
clone_args = {
'LinodeID': linode_id,
'DatacenterID': datacenter_id,
'PlanID': plan_id
}
return _query('linode', 'clone', args=clone_args)
def create(vm_):
'''
Create a single Linode VM.
'''
name = vm_['name']
try:
# Check for required profile parameters before sending any API calls.
if vm_['profile'] and config.is_profile_configured(__opts__,
__active_provider_name__ or 'linode',
vm_['profile'],
vm_=vm_) is False:
return False
except AttributeError:
pass
if _validate_name(name) is False:
return False
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Creating Cloud VM %s', name)
data = {}
kwargs = {'name': name}
plan_id = None
size = vm_.get('size')
if size:
kwargs['size'] = size
plan_id = get_plan_id(kwargs={'label': size})
datacenter_id = None
location = vm_.get('location')
if location:
try:
datacenter_id = get_datacenter_id(location)
except KeyError:
# Linode's default datacenter is Dallas, but we still have to set one to
# use the create function from Linode's API. Dallas's datacenter id is 2.
datacenter_id = 2
clonefrom_name = vm_.get('clonefrom')
cloning = True if clonefrom_name else False
if cloning:
linode_id = get_linode_id_from_name(clonefrom_name)
clone_source = get_linode(kwargs={'linode_id': linode_id})
kwargs = {
'clonefrom': clonefrom_name,
'image': 'Clone of {0}'.format(clonefrom_name),
}
if size is None:
size = clone_source['TOTALRAM']
kwargs['size'] = size
plan_id = clone_source['PLANID']
if location is None:
datacenter_id = clone_source['DATACENTERID']
# Create new Linode from cloned Linode
try:
result = clone(kwargs={'linode_id': linode_id,
'datacenter_id': datacenter_id,
'plan_id': plan_id})
except Exception as err:
log.error(
'Error cloning \'%s\' on Linode.\n\n'
'The following exception was thrown by Linode when trying to '
'clone the specified machine:\n%s',
clonefrom_name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
else:
kwargs['image'] = vm_['image']
# Create Linode
try:
result = _query('linode', 'create', args={
'PLANID': plan_id,
'DATACENTERID': datacenter_id
})
except Exception as err:
log.error(
'Error creating %s on Linode\n\n'
'The following exception was thrown by Linode when trying to '
'run the initial deployment:\n%s',
name, err, exc_info_on_loglevel=logging.DEBUG
)
return False
if 'ERRORARRAY' in result:
for error_data in result['ERRORARRAY']:
log.error(
'Error creating %s on Linode\n\n'
'The Linode API returned the following: %s\n',
name, error_data['ERRORMESSAGE']
)
return False
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
node_id = _clean_data(result)['LinodeID']
data['id'] = node_id
if not _wait_for_status(node_id, status=(_get_status_id_by_name('brand_new'))):
log.error(
'Error creating %s on LINODE\n\n'
'while waiting for initial ready status',
name, exc_info_on_loglevel=logging.DEBUG
)
# Update the Linode's Label to reflect the given VM name
update_linode(node_id, update_args={'Label': name})
log.debug('Set name for %s - was linode%s.', name, node_id)
# Add private IP address if requested
private_ip_assignment = get_private_ip(vm_)
if private_ip_assignment:
create_private_ip(node_id)
# Define which ssh_interface to use
ssh_interface = _get_ssh_interface(vm_)
# If ssh_interface is set to use private_ips, but assign_private_ip
# wasn't set to True, let's help out and create a private ip.
if ssh_interface == 'private_ips' and private_ip_assignment is False:
create_private_ip(node_id)
private_ip_assignment = True
if cloning:
config_id = get_config_id(kwargs={'linode_id': node_id})['config_id']
else:
# Create disks and get ids
log.debug('Creating disks for %s', name)
root_disk_id = create_disk_from_distro(vm_, node_id)['DiskID']
swap_disk_id = create_swap_disk(vm_, node_id)['DiskID']
# Create a ConfigID using disk ids
config_id = create_config(kwargs={'name': name,
'linode_id': node_id,
'root_disk_id': root_disk_id,
'swap_disk_id': swap_disk_id})['ConfigID']
# Boot the Linode
boot(kwargs={'linode_id': node_id,
'config_id': config_id,
'check_running': False})
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
data['image'] = kwargs['image']
data['name'] = name
data['size'] = size
data['state'] = _get_status_descr_by_id(state)
data['private_ips'] = ips['private_ips']
data['public_ips'] = ips['public_ips']
# Pass the correct IP address to the bootstrap ssh_host key
if ssh_interface == 'private_ips':
vm_['ssh_host'] = data['private_ips'][0]
else:
vm_['ssh_host'] = data['public_ips'][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_['password'] = get_password(vm_)
# Make public_ips and private_ips available to the bootstrap script.
vm_['public_ips'] = ips['public_ips']
vm_['private_ips'] = ips['private_ips']
# Send event that the instance has booted.
__utils__['cloud.fire_event'](
'event',
'waiting for ssh',
'salt/cloud/{0}/waiting_for_ssh'.format(name),
sock_dir=__opts__['sock_dir'],
args={'ip_address': vm_['ssh_host']},
transport=__opts__['transport']
)
# Bootstrap!
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
log.info('Created Cloud VM \'%s\'', name)
log.debug('\'%s\' VM creation details:\n%s', name, pprint.pformat(data))
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the Linode from the distribution.
vm\_
The VM profile to create the disk for.
linode_id
The ID of the Linode to create the distribution disk for. Required.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if swap_size is None:
swap_size = get_swap_size(vm_)
pub_key = get_pub_key(vm_)
root_password = get_password(vm_)
if pub_key:
kwargs.update({'rootSSHKey': pub_key})
if root_password:
kwargs.update({'rootPass': root_password})
else:
raise SaltCloudConfigError(
'The Linode driver requires a password.'
)
kwargs.update({'LinodeID': linode_id,
'DistributionID': get_distribution_id(vm_),
'Label': vm_['name'],
'Size': get_disk_size(vm_, swap_size, linode_id)})
result = _query('linode', 'disk.createfromdistribution', args=kwargs)
return _clean_data(result)
def create_swap_disk(vm_, linode_id, swap_size=None):
r'''
Creates the disk for the specified Linode.
vm\_
The VM profile to create the swap disk for.
linode_id
The ID of the Linode to create the swap disk for.
swap_size
The size of the disk, in MB.
'''
kwargs = {}
if not swap_size:
swap_size = get_swap_size(vm_)
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name'],
'Type': 'swap',
'Size': swap_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_data_disk(vm_=None, linode_id=None, data_size=None):
r'''
Create a data disk for the linode (type is hardcoded to ext4 at the moment)
.. versionadded:: 2016.3.0
vm\_
The VM profile to create the data disk for.
linode_id
The ID of the Linode to create the data disk for.
data_size
The size of the disk, in MB.
'''
kwargs = {}
kwargs.update({'LinodeID': linode_id,
'Label': vm_['name']+"_data",
'Type': 'ext4',
'Size': data_size
})
result = _query('linode', 'disk.create', args=kwargs)
return _clean_data(result)
def create_private_ip(linode_id):
r'''
Creates a private IP for the specified Linode.
linode_id
The ID of the Linode to create the IP address for.
'''
kwargs = {'LinodeID': linode_id}
result = _query('linode', 'ip.addprivate', args=kwargs)
return _clean_data(result)
def destroy(name, call=None):
'''
Destroys a Linode by name.
name
The name of VM to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudException(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'delete', args={'LinodeID': linode_id, 'skipChecks': True})
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return response
def get_config_id(kwargs=None, call=None):
'''
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudException(
'The get_config_id function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_config_id function requires either a \'name\' or a \'linode_id\' '
'to be provided.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query('linode', 'config.list', args={'LinodeID': linode_id})['DATA']
config_id = {'config_id': response[0]['ConfigID']}
return config_id
def get_datacenter_id(location):
'''
Returns the Linode Datacenter ID.
location
The location, or name, of the datacenter to get the ID from.
'''
return avail_locations()[location]['DATACENTERID']
def get_disk_size(vm_, swap, linode_id):
r'''
Returns the size of of the root disk in MB.
vm\_
The VM to get the disk size for.
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
return config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
def get_data_disk_size(vm_, swap, linode_id):
'''
Return the size of of the data disk in MB
.. versionadded:: 2016.3.0
'''
disk_size = get_linode(kwargs={'linode_id': linode_id})['TOTALHD']
root_disk_size = config.get_cloud_config_value(
'disk_size', vm_, __opts__, default=disk_size - swap
)
return disk_size - root_disk_size - swap
def get_distribution_id(vm_):
r'''
Returns the distribution ID for a VM
vm\_
The VM to get the distribution ID for
'''
distributions = _query('avail', 'distributions')['DATA']
vm_image_name = config.get_cloud_config_value('image', vm_, __opts__)
distro_id = ''
for distro in distributions:
if vm_image_name == distro['LABEL']:
distro_id = distro['DISTRIBUTIONID']
return distro_id
if not distro_id:
raise SaltCloudNotFound(
'The DistributionID for the \'{0}\' profile could not be found.\n'
'The \'{1}\' instance could not be provisioned. The following distributions '
'are available:\n{2}'.format(
vm_image_name,
vm_['name'],
pprint.pprint(sorted([distro['LABEL'].encode(__salt_system_encoding__) for distro in distributions]))
)
)
def get_ips(linode_id=None):
'''
Returns public and private IP addresses.
linode_id
Limits the IP addresses returned to the specified Linode ID.
'''
if linode_id:
ips = _query('linode', 'ip.list', args={'LinodeID': linode_id})
else:
ips = _query('linode', 'ip.list')
ips = ips['DATA']
ret = {}
for item in ips:
node_id = six.text_type(item['LINODEID'])
if item['ISPUBLIC'] == 1:
key = 'public_ips'
else:
key = 'private_ips'
if ret.get(node_id) is None:
ret.update({node_id: {'public_ips': [], 'private_ips': []}})
ret[node_id][key].append(item['IPADDRESS'])
# If linode_id was specified, only return the ips, and not the
# dictionary based on the linode ID as a key.
if linode_id:
_all_ips = {'public_ips': [], 'private_ips': []}
matching_id = ret.get(six.text_type(linode_id))
if matching_id:
_all_ips['private_ips'] = matching_id['private_ips']
_all_ips['public_ips'] = matching_id['public_ips']
ret = _all_ips
return ret
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
def get_linode_id_from_name(name):
'''
Returns the Linode ID for a VM from the provided name.
name
The name of the Linode from which to get the Linode ID. Required.
'''
nodes = _query('linode', 'list')['DATA']
linode_id = ''
for node in nodes:
if name == node['LABEL']:
linode_id = node['LINODEID']
return linode_id
if not linode_id:
raise SaltCloudNotFound(
'The specified name, {0}, could not be found.'.format(name)
)
def get_password(vm_):
r'''
Return the password to use for a VM.
vm\_
The configuration to obtain the password from.
'''
return config.get_cloud_config_value(
'password', vm_, __opts__,
default=config.get_cloud_config_value(
'passwd', vm_, __opts__,
search_global=False
),
search_global=False
)
def _decode_linode_plan_label(label):
'''
Attempts to decode a user-supplied Linode plan label
into the format in Linode API output
label
The label, or name, of the plan to decode.
Example:
`Linode 2048` will decode to `Linode 2GB`
'''
sizes = avail_sizes()
if label not in sizes:
if 'GB' in label:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
else:
plan = label.split()
if len(plan) != 2:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(label)
)
plan_type = plan[0]
try:
plan_size = int(plan[1])
except TypeError:
plan_size = 0
log.debug('Failed to decode Linode plan label in Cloud Profile: %s', label)
if plan_type == 'Linode' and plan_size == 1024:
plan_type = 'Nanode'
plan_size = plan_size/1024
new_label = "{} {}GB".format(plan_type, plan_size)
if new_label not in sizes:
raise SaltCloudException(
'Invalid Linode plan ({}) specified - call avail_sizes() for all available options'.format(new_label)
)
log.warning(
'An outdated Linode plan label was detected in your Cloud '
'Profile (%s). Please update the profile to use the new '
'label format (%s) for the requested Linode plan size.',
label, new_label
)
label = new_label
return sizes[label]['PLANID']
def get_plan_id(kwargs=None, call=None):
'''
Returns the Linode Plan ID.
label
The label, or name, of the plan to get the ID from.
CLI Example:
.. code-block:: bash
salt-cloud -f get_plan_id linode label="Linode 1024"
'''
if call == 'action':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
label = kwargs.get('label', None)
if label is None:
raise SaltCloudException(
'The get_plan_id function requires a \'label\'.'
)
label = _decode_linode_plan_label(label)
return label
def get_private_ip(vm_):
'''
Return True if a private ip address is requested
'''
return config.get_cloud_config_value(
'assign_private_ip', vm_, __opts__, default=False
)
def get_data_disk(vm_):
'''
Return True if a data disk is requested
.. versionadded:: 2016.3.0
'''
return config.get_cloud_config_value(
'allocate_data_disk', vm_, __opts__, default=False
)
def get_pub_key(vm_):
r'''
Return the SSH pubkey.
vm\_
The configuration to obtain the public key from.
'''
return config.get_cloud_config_value(
'ssh_pubkey', vm_, __opts__, search_global=False
)
def get_swap_size(vm_):
r'''
Returns the amoutn of swap space to be used in MB.
vm\_
The VM profile to obtain the swap size from.
'''
return config.get_cloud_config_value(
'swap', vm_, __opts__, default=128
)
def get_vm_size(vm_):
r'''
Returns the VM's size.
vm\_
The VM to get the size for.
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
ram = avail_sizes()[vm_size]['RAM']
if vm_size.startswith('Linode'):
vm_size = vm_size.replace('Linode ', '')
if ram == int(vm_size):
return ram
else:
raise SaltCloudNotFound(
'The specified size, {0}, could not be found.'.format(vm_size)
)
def list_nodes(call=None):
'''
Returns a list of linodes, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes function must be called with -f or --function.'
)
return _list_linodes(full=False)
def list_nodes_full(call=None):
'''
List linodes, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-linode-config
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call == 'action':
raise SaltCloudException(
'The list_nodes_full function must be called with -f or --function.'
)
return _list_linodes(full=True)
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-linode-config
salt-cloud --function list_nodes_min my-linode-config
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
nodes = _query('linode', 'list')['DATA']
for node in nodes:
name = node['LABEL']
this_node = {
'id': six.text_type(node['LINODEID']),
'state': _get_status_descr_by_id(int(node['STATUS']))
}
ret[name] = this_node
return ret
def list_nodes_select(call=None):
'''
Return a list of the VMs that are on the provider, with select fields.
'''
return __utils__['cloud.list_nodes_select'](
list_nodes_full(), __opts__['query.selection'], call,
)
def reboot(name, call=None):
'''
Reboot a linode.
.. versionadded:: 2015.8.0
name
The name of the VM to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
response = _query('linode', 'reboot', args={'LinodeID': node_id})
data = _clean_data(response)
reboot_jid = data['JobID']
if not _wait_for_job(node_id, reboot_jid):
log.error('Reboot failed for %s.', name)
return False
return data
def show_instance(name, call=None):
'''
Displays details about a particular Linode VM. Either a name or a linode_id must
be provided.
.. versionadded:: 2015.8.0
name
The name of the VM for which to display details.
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance vm_name
.. note::
The ``image`` label only displays information about the VM's distribution vendor,
such as "Debian" or "RHEL" and does not display the actual image name. This is
due to a limitation of the Linode API.
'''
if call != 'action':
raise SaltCloudException(
'The show_instance action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node_data = get_linode(kwargs={'linode_id': node_id})
ips = get_ips(node_id)
state = int(node_data['STATUS'])
ret = {'id': node_data['LINODEID'],
'image': node_data['DISTRIBUTIONVENDOR'],
'name': node_data['LABEL'],
'size': node_data['TOTALRAM'],
'state': _get_status_descr_by_id(state),
'private_ips': ips['private_ips'],
'public_ips': ips['public_ips']}
return ret
def show_pricing(kwargs=None, call=None):
'''
Show pricing for a particular profile. This is only an estimate, based on
unofficial pricing sources.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f show_pricing my-linode-config profile=my-linode-profile
'''
if call != 'function':
raise SaltCloudException(
'The show_instance action must be called with -f or --function.'
)
profile = __opts__['profiles'].get(kwargs['profile'], {})
if not profile:
raise SaltCloudNotFound(
'The requested profile was not found.'
)
# Make sure the profile belongs to Linode
provider = profile.get('provider', '0:0')
comps = provider.split(':')
if len(comps) < 2 or comps[1] != 'linode':
raise SaltCloudException(
'The requested profile does not belong to Linode.'
)
plan_id = get_plan_id(kwargs={'label': profile['size']})
response = _query('avail', 'linodeplans', args={'PlanID': plan_id})['DATA'][0]
ret = {}
ret['per_hour'] = response['HOURLY']
ret['per_day'] = ret['per_hour'] * 24
ret['per_week'] = ret['per_day'] * 7
ret['per_month'] = response['PRICE']
ret['per_year'] = ret['per_month'] * 12
return {profile['profile']: ret}
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False}
def stop(name, call=None):
'''
Stop a VM in Linode.
name
The name of the VM to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The stop action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 2:
return {'success': True,
'state': 'Stopped',
'msg': 'Machine already stopped'}
response = _query('linode', 'shutdown', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Stopped',
'action': 'stop',
'success': True}
else:
return {'action': 'stop',
'success': False}
def update_linode(linode_id, update_args=None):
'''
Updates a Linode's properties.
linode_id
The ID of the Linode to shutdown. Required.
update_args
The args to update the Linode with. Must be in dictionary form.
'''
update_args.update({'LinodeID': linode_id})
result = _query('linode', 'update', args=update_args)
return _clean_data(result)
def _clean_data(api_response):
'''
Returns the DATA response from a Linode API query as a single pre-formatted dictionary
api_response
The query to be cleaned.
'''
data = {}
data.update(api_response['DATA'])
if not data:
response_data = api_response['DATA']
data.update(response_data)
return data
def _list_linodes(full=False):
'''
Helper function to format and parse linode data
'''
nodes = _query('linode', 'list')['DATA']
ips = get_ips()
ret = {}
for node in nodes:
this_node = {}
linode_id = six.text_type(node['LINODEID'])
this_node['id'] = linode_id
this_node['image'] = node['DISTRIBUTIONVENDOR']
this_node['name'] = node['LABEL']
this_node['size'] = node['TOTALRAM']
state = int(node['STATUS'])
this_node['state'] = _get_status_descr_by_id(state)
for key, val in six.iteritems(ips):
if key == linode_id:
this_node['private_ips'] = val['private_ips']
this_node['public_ips'] = val['public_ips']
if full:
this_node['extra'] = node
ret[node['LABEL']] = this_node
return ret
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None,
url='https://api.linode.com/'):
'''
Make a web call to the Linode API.
'''
global LASTCALL
vm_ = get_configured_provider()
ratelimit_sleep = config.get_cloud_config_value(
'ratelimit_sleep', vm_, __opts__, search_global=False, default=0,
)
apikey = config.get_cloud_config_value(
'apikey', vm_, __opts__, search_global=False
)
if not isinstance(args, dict):
args = {}
if 'api_key' not in args.keys():
args['api_key'] = apikey
if action and 'api_action' not in args.keys():
args['api_action'] = '{0}.{1}'.format(action, command)
if header_dict is None:
header_dict = {}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
now = int(time.mktime(datetime.datetime.now().timetuple()))
if LASTCALL >= now:
time.sleep(ratelimit_sleep)
result = __utils__['http.query'](
url,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
hide_fields=['api_key', 'rootPass'],
opts=__opts__,
)
if 'ERRORARRAY' in result['dict']:
if result['dict']['ERRORARRAY']:
error_list = []
for error in result['dict']['ERRORARRAY']:
msg = error['ERRORMESSAGE']
if msg == "Authentication failed":
raise SaltCloudSystemExit(
'Linode API Key is expired or invalid'
)
else:
error_list.append(msg)
raise SaltCloudException(
'Linode API reported error(s): {}'.format(", ".join(error_list))
)
LASTCALL = int(time.mktime(datetime.datetime.now().timetuple()))
log.debug('Linode Response Status Code: %s', result['status'])
return result['dict']
def _wait_for_job(linode_id, job_id, timeout=300, quiet=True):
'''
Wait for a Job to return.
linode_id
The ID of the Linode to wait on. Required.
job_id
The ID of the job to wait for.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when True. Otherwise, logs to info.
'''
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
jobs_result = _query('linode',
'job.list',
args={'LinodeID': linode_id})['DATA']
if jobs_result[0]['JOBID'] == job_id and jobs_result[0]['HOST_SUCCESS'] == 1:
return True
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Still waiting on Job %s for Linode %s.', job_id, linode_id
)
return False
def _wait_for_status(linode_id, status=None, timeout=300, quiet=True):
'''
Wait for a certain status from Linode.
linode_id
The ID of the Linode to wait on. Required.
status
The status to look for to update.
timeout
The amount of time to wait for a status to update.
quiet
Log status updates to debug logs when False. Otherwise, logs to info.
'''
if status is None:
status = _get_status_id_by_name('brand_new')
status_desc_waiting = _get_status_descr_by_id(status)
interval = 5
iterations = int(timeout / interval)
for i in range(0, iterations):
result = get_linode(kwargs={'linode_id': linode_id})
if result['STATUS'] == status:
return True
status_desc_result = _get_status_descr_by_id(result['STATUS'])
time.sleep(interval)
log.log(
logging.INFO if not quiet else logging.DEBUG,
'Status for Linode %s is \'%s\', waiting for \'%s\'.',
linode_id, status_desc_result, status_desc_waiting
)
return False
def _get_status_descr_by_id(status_id):
'''
Return linode status by ID
status_id
linode VM status ID
'''
for status_name, status_data in six.iteritems(LINODE_STATUS):
if status_data['code'] == int(status_id):
return status_data['descr']
return LINODE_STATUS.get(status_id, None)
def _get_status_id_by_name(status_name):
'''
Return linode status description by internalstatus name
status_name
internal linode VM status name
'''
return LINODE_STATUS.get(status_name, {}).get('code', None)
def _get_ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
|
saltstack/salt
|
salt/modules/napalm_netacl.py
|
_get_capirca_platform
|
python
|
def _get_capirca_platform(): # pylint: disable=too-many-return-statements
'''
Given the following NAPALM grains, we can determine the Capirca platform name:
- vendor
- device model
- operating system
Not the most optimal.
'''
vendor = __grains__['vendor'].lower()
os_ = __grains__['os'].lower()
model = __grains__['model'].lower()
if vendor == 'juniper' and 'srx' in model:
return 'junipersrx'
elif vendor == 'cisco' and os_ == 'ios':
return 'cisco'
elif vendor == 'cisco' and os_ == 'iosxr':
return 'ciscoxr'
elif vendor == 'cisco' and os_ == 'asa':
return 'ciscoasa'
elif os_ == 'linux':
return 'iptables'
elif vendor == 'palo alto networks':
return 'paloaltofw'
# anything else will point to the vendor
# i.e.: some of the Capirca platforms are named by the device vendor
# e.g.: eOS => arista, junos => juniper, etc.
return vendor
|
Given the following NAPALM grains, we can determine the Capirca platform name:
- vendor
- device model
- operating system
Not the most optimal.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_netacl.py#L80-L108
| null |
# -*- coding: utf-8 -*-
'''
NAPALM ACL
==========
Generate and load ACL (firewall) configuration on network devices.
.. versionadded:: 2017.7.0
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: capirca, napalm
:platform: unix
Dependencies
------------
The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
To install Capirca, execute: ``pip install capirca``.
To be able to load configuration on network devices,
it requires NAPALM_ library to be installed: ``pip install napalm``.
Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
log = logging.getLogger(__file__)
# Import third party libs
try:
# pylint: disable=W0611
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
HAS_CAPIRCA = True
# pylint: enable=W0611
except ImportError:
HAS_CAPIRCA = False
# import Salt modules
import salt.utils.napalm
from salt.utils.napalm import proxy_napalm_wrap
# ------------------------------------------------------------------------------
# module properties
# ------------------------------------------------------------------------------
__virtualname__ = 'netacl'
__proxyenabled__ = ['*']
# allow napalm proxy only
# ------------------------------------------------------------------------------
# property functions
# ------------------------------------------------------------------------------
def __virtual__():
'''
This module requires both NAPALM and Capirca.
'''
if HAS_CAPIRCA and salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
return (False, 'The netacl (napalm_acl) module cannot be loaded: \
Please install capirca and napalm.')
# ------------------------------------------------------------------------------
# helper functions -- will not be exported
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
@proxy_napalm_wrap
def load_term_config(filter_name,
term_name,
filter_options=None,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
source_service=None,
destination_service=None,
**term_fields):
'''
Generate and load the configuration of a policy term.
filter_name
The name of the policy filter.
term_name
The name of the term.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
If the pillar contains the following structure:
.. code-block:: yaml
firewall:
- my-filter:
terms:
- my-term:
source_port: 1234
source_address:
- 1.2.3.4/32
- 5.6.7.8/32
The ``pillar_key`` field would be specified as ``firewall``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The properties specified through the CLI have higher priority than the pillar.
revision_id
Add a comment in the term config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the term configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a source_port and protocol.
As this module is available on Unix platforms only,
it reads the IANA_ port assignment from /etc/services.
If the user requires additional shortcuts to be referenced, they can add entries under /etc/services,
which can be managed using the :mod:`file state <salt.states.file>`.
.. _IANA: http://www.iana.org/assignments/port-numbers
destination_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a destination_port and protocol.
Allows the same options as ``source_service``.
term_fields
Term attributes. To see what fields are supported, please consult the
list of supported keywords_. Some platforms have a few other optional_
keywords.
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
.. note::
The following fields are accepted (some being platform-specific):
- action
- address
- address_exclude
- comment
- counter
- expiration
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- log_name
- loss_priority
- option
- policer
- port
- precedence
- principals
- protocol
- protocol_except
- qos
- pan_application
- routing_instance
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- packet_length
- fragment_offset
- hop_limit
- icmp_type
- ether_type
- traffic_class_count
- traffic_type
- translated
- dscp_set
- dscp_match
- dscp_except
- next_ip
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- vpn
- source_tag
- destination_tag
- source_interface
- destination_interface
- flattened
- flattened_addr
- flattened_saddr
- flattened_daddr
- priority
.. note::
The following fields can be also a single value and a list of values:
- action
- address
- address_exclude
- comment
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- option
- port
- precedence
- principals
- protocol
- protocol_except
- pan_application
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- icmp_type
- ether_type
- traffic_type
- dscp_match
- dscp_except
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- source_tag
- destination_tag
- source_service
- destination_service
Example: ``destination_address`` can be either defined as:
.. code-block:: yaml
destination_address: 172.17.17.1/24
or as a list of destination IP addresses:
.. code-block:: yaml
destination_address:
- 172.17.17.1/24
- 172.17.19.1/24
or a list of services to be matched:
.. code-block:: yaml
source_service:
- ntp
- snmp
- ldap
- bgpd
.. note::
The port fields ``source_port`` and ``destination_port`` can be used as above to select either
a single value, either a list of values, but also they can select port ranges. Example:
.. code-block:: yaml
source_port:
- - 1000
- 2000
- - 3000
- 4000
With the configuration above, the user is able to select the 1000-2000 and 3000-4000 source port ranges.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_term_config filter-name term-name source_address=1.2.3.4 destination_address=5.6.7.8 action=accept test=True debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
Configuration discarded.
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter filter-name {
+ interface-specific;
+ term term-name {
+ from {
+ source-address {
+ 1.2.3.4/32;
+ }
+ destination-address {
+ 5.6.7.8/32;
+ }
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter filter-name {
interface-specific;
term term-name {
from {
source-address {
1.2.3.4/32;
}
destination-address {
5.6.7.8/32;
}
}
then accept;
}
}
}
}
result:
True
'''
if not filter_options:
filter_options = []
platform = _get_capirca_platform()
term_config = __salt__['capirca.get_term_config'](platform,
filter_name,
term_name,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format,
source_service=source_service,
destination_service=destination_service,
**term_fields)
return __salt__['net.load_config'](text=term_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_filter_config(filter_name,
filter_options=None,
terms=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of a policy filter.
.. note::
The order of the terms is very important. The configuration loaded
on the device respects the order defined in the ``terms`` and/or
inside the pillar.
When merging the ``terms`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filter_name
The name of the policy filter.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
terms
List of terms for this policy filter.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of terms generated by merging
the terms from ``terms`` with those defined in the pillar (if any): new terms are prepended
at the beginning, while existing ones will preserve the position. To add the new terms
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the terms fields. Otherwise it will try
to merge also filters fields. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the filter config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the filter configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_filter_config my-filter pillar_key=netacl debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter my-filter {
+ interface-specific;
+ term my-term {
+ from {
+ source-port [ 1234 1235 ];
+ }
+ then {
+ reject;
+ }
+ }
+ term my-other-term {
+ from {
+ protocol tcp;
+ source-port 5678-5680;
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter my-filter {
interface-specific;
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
result:
True
The filter configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
action: reject
- my-other-term:
source_port:
- - 5678
- 5680
protocol: tcp
action: accept
'''
if not filter_options:
filter_options = []
if not terms:
terms = []
platform = _get_capirca_platform()
filter_config = __salt__['capirca.get_filter_config'](platform,
filter_name,
terms=terms,
prepend=prepend,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=filter_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_policy_config(filters=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of the whole policy.
.. note::
The order of the filters and their terms is very important.
The configuration loaded on the device respects the order
defined in the ``filters`` and/or inside the pillar.
When merging the ``filters`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.flw01' netacl.load_policy_config debug=True
Output Example:
.. code-block:: text
edge01.flw01:
----------
already_configured:
False
comment:
diff:
---
+++
@@ -1228,9 +1228,24 @@
!
+ipv4 access-list my-filter
+ 10 remark my-term
+ 20 deny tcp host 1.2.3.4 eq 1234 any
+ 30 deny udp host 1.2.3.4 eq 1234 any
+ 40 deny tcp host 1.2.3.4 eq 1235 any
+ 50 deny udp host 1.2.3.4 eq 1235 any
+ 60 remark my-other-term
+ 70 permit tcp any range 5678 5680 any
+!
+!
+ipv4 access-list block-icmp
+ 10 remark first-term
+ 20 deny icmp any any
!
loaded_config:
! $Date: 2017/03/22 $
no ipv4 access-list my-filter
ipv4 access-list my-filter
remark my-term
deny tcp host 1.2.3.4 eq 1234 any
deny udp host 1.2.3.4 eq 1234 any
deny tcp host 1.2.3.4 eq 1235 any
deny udp host 1.2.3.4 eq 1235 any
remark my-other-term
permit tcp any range 5678 5680 any
exit
no ipv4 access-list block-icmp
ipv4 access-list block-icmp
remark first-term
deny icmp any any
exit
result:
True
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
acl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
protocol:
- tcp
- udp
source_address: 1.2.3.4
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- block-icmp:
terms:
- first-term:
protocol:
- icmp
action: reject
'''
if not filters:
filters = []
platform = _get_capirca_platform()
policy_config = __salt__['capirca.get_policy_config'](platform,
filters=filters,
prepend=prepend,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=policy_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
def get_filter_pillar(filter_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the filter configuration given its name.
filter_name
The name of the filter.
pillar_key
The root key of the whole policy config.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_filter_pillar'](filter_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
def get_term_pillar(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_term_pillar'](filter_name,
term_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
|
saltstack/salt
|
salt/modules/napalm_netacl.py
|
load_term_config
|
python
|
def load_term_config(filter_name,
term_name,
filter_options=None,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
source_service=None,
destination_service=None,
**term_fields):
'''
Generate and load the configuration of a policy term.
filter_name
The name of the policy filter.
term_name
The name of the term.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
If the pillar contains the following structure:
.. code-block:: yaml
firewall:
- my-filter:
terms:
- my-term:
source_port: 1234
source_address:
- 1.2.3.4/32
- 5.6.7.8/32
The ``pillar_key`` field would be specified as ``firewall``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The properties specified through the CLI have higher priority than the pillar.
revision_id
Add a comment in the term config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the term configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a source_port and protocol.
As this module is available on Unix platforms only,
it reads the IANA_ port assignment from /etc/services.
If the user requires additional shortcuts to be referenced, they can add entries under /etc/services,
which can be managed using the :mod:`file state <salt.states.file>`.
.. _IANA: http://www.iana.org/assignments/port-numbers
destination_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a destination_port and protocol.
Allows the same options as ``source_service``.
term_fields
Term attributes. To see what fields are supported, please consult the
list of supported keywords_. Some platforms have a few other optional_
keywords.
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
.. note::
The following fields are accepted (some being platform-specific):
- action
- address
- address_exclude
- comment
- counter
- expiration
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- log_name
- loss_priority
- option
- policer
- port
- precedence
- principals
- protocol
- protocol_except
- qos
- pan_application
- routing_instance
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- packet_length
- fragment_offset
- hop_limit
- icmp_type
- ether_type
- traffic_class_count
- traffic_type
- translated
- dscp_set
- dscp_match
- dscp_except
- next_ip
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- vpn
- source_tag
- destination_tag
- source_interface
- destination_interface
- flattened
- flattened_addr
- flattened_saddr
- flattened_daddr
- priority
.. note::
The following fields can be also a single value and a list of values:
- action
- address
- address_exclude
- comment
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- option
- port
- precedence
- principals
- protocol
- protocol_except
- pan_application
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- icmp_type
- ether_type
- traffic_type
- dscp_match
- dscp_except
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- source_tag
- destination_tag
- source_service
- destination_service
Example: ``destination_address`` can be either defined as:
.. code-block:: yaml
destination_address: 172.17.17.1/24
or as a list of destination IP addresses:
.. code-block:: yaml
destination_address:
- 172.17.17.1/24
- 172.17.19.1/24
or a list of services to be matched:
.. code-block:: yaml
source_service:
- ntp
- snmp
- ldap
- bgpd
.. note::
The port fields ``source_port`` and ``destination_port`` can be used as above to select either
a single value, either a list of values, but also they can select port ranges. Example:
.. code-block:: yaml
source_port:
- - 1000
- 2000
- - 3000
- 4000
With the configuration above, the user is able to select the 1000-2000 and 3000-4000 source port ranges.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_term_config filter-name term-name source_address=1.2.3.4 destination_address=5.6.7.8 action=accept test=True debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
Configuration discarded.
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter filter-name {
+ interface-specific;
+ term term-name {
+ from {
+ source-address {
+ 1.2.3.4/32;
+ }
+ destination-address {
+ 5.6.7.8/32;
+ }
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter filter-name {
interface-specific;
term term-name {
from {
source-address {
1.2.3.4/32;
}
destination-address {
5.6.7.8/32;
}
}
then accept;
}
}
}
}
result:
True
'''
if not filter_options:
filter_options = []
platform = _get_capirca_platform()
term_config = __salt__['capirca.get_term_config'](platform,
filter_name,
term_name,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format,
source_service=source_service,
destination_service=destination_service,
**term_fields)
return __salt__['net.load_config'](text=term_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device)
|
Generate and load the configuration of a policy term.
filter_name
The name of the policy filter.
term_name
The name of the term.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
If the pillar contains the following structure:
.. code-block:: yaml
firewall:
- my-filter:
terms:
- my-term:
source_port: 1234
source_address:
- 1.2.3.4/32
- 5.6.7.8/32
The ``pillar_key`` field would be specified as ``firewall``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The properties specified through the CLI have higher priority than the pillar.
revision_id
Add a comment in the term config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the term configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a source_port and protocol.
As this module is available on Unix platforms only,
it reads the IANA_ port assignment from /etc/services.
If the user requires additional shortcuts to be referenced, they can add entries under /etc/services,
which can be managed using the :mod:`file state <salt.states.file>`.
.. _IANA: http://www.iana.org/assignments/port-numbers
destination_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a destination_port and protocol.
Allows the same options as ``source_service``.
term_fields
Term attributes. To see what fields are supported, please consult the
list of supported keywords_. Some platforms have a few other optional_
keywords.
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
.. note::
The following fields are accepted (some being platform-specific):
- action
- address
- address_exclude
- comment
- counter
- expiration
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- log_name
- loss_priority
- option
- policer
- port
- precedence
- principals
- protocol
- protocol_except
- qos
- pan_application
- routing_instance
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- packet_length
- fragment_offset
- hop_limit
- icmp_type
- ether_type
- traffic_class_count
- traffic_type
- translated
- dscp_set
- dscp_match
- dscp_except
- next_ip
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- vpn
- source_tag
- destination_tag
- source_interface
- destination_interface
- flattened
- flattened_addr
- flattened_saddr
- flattened_daddr
- priority
.. note::
The following fields can be also a single value and a list of values:
- action
- address
- address_exclude
- comment
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- option
- port
- precedence
- principals
- protocol
- protocol_except
- pan_application
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- icmp_type
- ether_type
- traffic_type
- dscp_match
- dscp_except
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- source_tag
- destination_tag
- source_service
- destination_service
Example: ``destination_address`` can be either defined as:
.. code-block:: yaml
destination_address: 172.17.17.1/24
or as a list of destination IP addresses:
.. code-block:: yaml
destination_address:
- 172.17.17.1/24
- 172.17.19.1/24
or a list of services to be matched:
.. code-block:: yaml
source_service:
- ntp
- snmp
- ldap
- bgpd
.. note::
The port fields ``source_port`` and ``destination_port`` can be used as above to select either
a single value, either a list of values, but also they can select port ranges. Example:
.. code-block:: yaml
source_port:
- - 1000
- 2000
- - 3000
- 4000
With the configuration above, the user is able to select the 1000-2000 and 3000-4000 source port ranges.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_term_config filter-name term-name source_address=1.2.3.4 destination_address=5.6.7.8 action=accept test=True debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
Configuration discarded.
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter filter-name {
+ interface-specific;
+ term term-name {
+ from {
+ source-address {
+ 1.2.3.4/32;
+ }
+ destination-address {
+ 5.6.7.8/32;
+ }
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter filter-name {
interface-specific;
term term-name {
from {
source-address {
1.2.3.4/32;
}
destination-address {
5.6.7.8/32;
}
}
then accept;
}
}
}
}
result:
True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_netacl.py#L116-L451
|
[
"def _get_capirca_platform(): # pylint: disable=too-many-return-statements\n '''\n Given the following NAPALM grains, we can determine the Capirca platform name:\n\n - vendor\n - device model\n - operating system\n\n Not the most optimal.\n '''\n vendor = __grains__['vendor'].lower()\n os_ = __grains__['os'].lower()\n model = __grains__['model'].lower()\n if vendor == 'juniper' and 'srx' in model:\n return 'junipersrx'\n elif vendor == 'cisco' and os_ == 'ios':\n return 'cisco'\n elif vendor == 'cisco' and os_ == 'iosxr':\n return 'ciscoxr'\n elif vendor == 'cisco' and os_ == 'asa':\n return 'ciscoasa'\n elif os_ == 'linux':\n return 'iptables'\n elif vendor == 'palo alto networks':\n return 'paloaltofw'\n # anything else will point to the vendor\n # i.e.: some of the Capirca platforms are named by the device vendor\n # e.g.: eOS => arista, junos => juniper, etc.\n return vendor\n"
] |
# -*- coding: utf-8 -*-
'''
NAPALM ACL
==========
Generate and load ACL (firewall) configuration on network devices.
.. versionadded:: 2017.7.0
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: capirca, napalm
:platform: unix
Dependencies
------------
The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
To install Capirca, execute: ``pip install capirca``.
To be able to load configuration on network devices,
it requires NAPALM_ library to be installed: ``pip install napalm``.
Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
log = logging.getLogger(__file__)
# Import third party libs
try:
# pylint: disable=W0611
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
HAS_CAPIRCA = True
# pylint: enable=W0611
except ImportError:
HAS_CAPIRCA = False
# import Salt modules
import salt.utils.napalm
from salt.utils.napalm import proxy_napalm_wrap
# ------------------------------------------------------------------------------
# module properties
# ------------------------------------------------------------------------------
__virtualname__ = 'netacl'
__proxyenabled__ = ['*']
# allow napalm proxy only
# ------------------------------------------------------------------------------
# property functions
# ------------------------------------------------------------------------------
def __virtual__():
'''
This module requires both NAPALM and Capirca.
'''
if HAS_CAPIRCA and salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
return (False, 'The netacl (napalm_acl) module cannot be loaded: \
Please install capirca and napalm.')
# ------------------------------------------------------------------------------
# helper functions -- will not be exported
# ------------------------------------------------------------------------------
def _get_capirca_platform(): # pylint: disable=too-many-return-statements
'''
Given the following NAPALM grains, we can determine the Capirca platform name:
- vendor
- device model
- operating system
Not the most optimal.
'''
vendor = __grains__['vendor'].lower()
os_ = __grains__['os'].lower()
model = __grains__['model'].lower()
if vendor == 'juniper' and 'srx' in model:
return 'junipersrx'
elif vendor == 'cisco' and os_ == 'ios':
return 'cisco'
elif vendor == 'cisco' and os_ == 'iosxr':
return 'ciscoxr'
elif vendor == 'cisco' and os_ == 'asa':
return 'ciscoasa'
elif os_ == 'linux':
return 'iptables'
elif vendor == 'palo alto networks':
return 'paloaltofw'
# anything else will point to the vendor
# i.e.: some of the Capirca platforms are named by the device vendor
# e.g.: eOS => arista, junos => juniper, etc.
return vendor
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
@proxy_napalm_wrap
# pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_filter_config(filter_name,
filter_options=None,
terms=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of a policy filter.
.. note::
The order of the terms is very important. The configuration loaded
on the device respects the order defined in the ``terms`` and/or
inside the pillar.
When merging the ``terms`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filter_name
The name of the policy filter.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
terms
List of terms for this policy filter.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of terms generated by merging
the terms from ``terms`` with those defined in the pillar (if any): new terms are prepended
at the beginning, while existing ones will preserve the position. To add the new terms
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the terms fields. Otherwise it will try
to merge also filters fields. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the filter config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the filter configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_filter_config my-filter pillar_key=netacl debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter my-filter {
+ interface-specific;
+ term my-term {
+ from {
+ source-port [ 1234 1235 ];
+ }
+ then {
+ reject;
+ }
+ }
+ term my-other-term {
+ from {
+ protocol tcp;
+ source-port 5678-5680;
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter my-filter {
interface-specific;
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
result:
True
The filter configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
action: reject
- my-other-term:
source_port:
- - 5678
- 5680
protocol: tcp
action: accept
'''
if not filter_options:
filter_options = []
if not terms:
terms = []
platform = _get_capirca_platform()
filter_config = __salt__['capirca.get_filter_config'](platform,
filter_name,
terms=terms,
prepend=prepend,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=filter_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_policy_config(filters=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of the whole policy.
.. note::
The order of the filters and their terms is very important.
The configuration loaded on the device respects the order
defined in the ``filters`` and/or inside the pillar.
When merging the ``filters`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.flw01' netacl.load_policy_config debug=True
Output Example:
.. code-block:: text
edge01.flw01:
----------
already_configured:
False
comment:
diff:
---
+++
@@ -1228,9 +1228,24 @@
!
+ipv4 access-list my-filter
+ 10 remark my-term
+ 20 deny tcp host 1.2.3.4 eq 1234 any
+ 30 deny udp host 1.2.3.4 eq 1234 any
+ 40 deny tcp host 1.2.3.4 eq 1235 any
+ 50 deny udp host 1.2.3.4 eq 1235 any
+ 60 remark my-other-term
+ 70 permit tcp any range 5678 5680 any
+!
+!
+ipv4 access-list block-icmp
+ 10 remark first-term
+ 20 deny icmp any any
!
loaded_config:
! $Date: 2017/03/22 $
no ipv4 access-list my-filter
ipv4 access-list my-filter
remark my-term
deny tcp host 1.2.3.4 eq 1234 any
deny udp host 1.2.3.4 eq 1234 any
deny tcp host 1.2.3.4 eq 1235 any
deny udp host 1.2.3.4 eq 1235 any
remark my-other-term
permit tcp any range 5678 5680 any
exit
no ipv4 access-list block-icmp
ipv4 access-list block-icmp
remark first-term
deny icmp any any
exit
result:
True
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
acl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
protocol:
- tcp
- udp
source_address: 1.2.3.4
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- block-icmp:
terms:
- first-term:
protocol:
- icmp
action: reject
'''
if not filters:
filters = []
platform = _get_capirca_platform()
policy_config = __salt__['capirca.get_policy_config'](platform,
filters=filters,
prepend=prepend,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=policy_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
def get_filter_pillar(filter_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the filter configuration given its name.
filter_name
The name of the filter.
pillar_key
The root key of the whole policy config.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_filter_pillar'](filter_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
def get_term_pillar(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_term_pillar'](filter_name,
term_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
|
saltstack/salt
|
salt/modules/napalm_netacl.py
|
load_filter_config
|
python
|
def load_filter_config(filter_name,
filter_options=None,
terms=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of a policy filter.
.. note::
The order of the terms is very important. The configuration loaded
on the device respects the order defined in the ``terms`` and/or
inside the pillar.
When merging the ``terms`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filter_name
The name of the policy filter.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
terms
List of terms for this policy filter.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of terms generated by merging
the terms from ``terms`` with those defined in the pillar (if any): new terms are prepended
at the beginning, while existing ones will preserve the position. To add the new terms
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the terms fields. Otherwise it will try
to merge also filters fields. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the filter config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the filter configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_filter_config my-filter pillar_key=netacl debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter my-filter {
+ interface-specific;
+ term my-term {
+ from {
+ source-port [ 1234 1235 ];
+ }
+ then {
+ reject;
+ }
+ }
+ term my-other-term {
+ from {
+ protocol tcp;
+ source-port 5678-5680;
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter my-filter {
interface-specific;
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
result:
True
The filter configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
action: reject
- my-other-term:
source_port:
- - 5678
- 5680
protocol: tcp
action: accept
'''
if not filter_options:
filter_options = []
if not terms:
terms = []
platform = _get_capirca_platform()
filter_config = __salt__['capirca.get_filter_config'](platform,
filter_name,
terms=terms,
prepend=prepend,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=filter_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device)
|
Generate and load the configuration of a policy filter.
.. note::
The order of the terms is very important. The configuration loaded
on the device respects the order defined in the ``terms`` and/or
inside the pillar.
When merging the ``terms`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filter_name
The name of the policy filter.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
terms
List of terms for this policy filter.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of terms generated by merging
the terms from ``terms`` with those defined in the pillar (if any): new terms are prepended
at the beginning, while existing ones will preserve the position. To add the new terms
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the terms fields. Otherwise it will try
to merge also filters fields. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the filter config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the filter configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_filter_config my-filter pillar_key=netacl debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter my-filter {
+ interface-specific;
+ term my-term {
+ from {
+ source-port [ 1234 1235 ];
+ }
+ then {
+ reject;
+ }
+ }
+ term my-other-term {
+ from {
+ protocol tcp;
+ source-port 5678-5680;
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter my-filter {
interface-specific;
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
result:
True
The filter configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
action: reject
- my-other-term:
source_port:
- - 5678
- 5680
protocol: tcp
action: accept
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_netacl.py#L455-L665
|
[
"def _get_capirca_platform(): # pylint: disable=too-many-return-statements\n '''\n Given the following NAPALM grains, we can determine the Capirca platform name:\n\n - vendor\n - device model\n - operating system\n\n Not the most optimal.\n '''\n vendor = __grains__['vendor'].lower()\n os_ = __grains__['os'].lower()\n model = __grains__['model'].lower()\n if vendor == 'juniper' and 'srx' in model:\n return 'junipersrx'\n elif vendor == 'cisco' and os_ == 'ios':\n return 'cisco'\n elif vendor == 'cisco' and os_ == 'iosxr':\n return 'ciscoxr'\n elif vendor == 'cisco' and os_ == 'asa':\n return 'ciscoasa'\n elif os_ == 'linux':\n return 'iptables'\n elif vendor == 'palo alto networks':\n return 'paloaltofw'\n # anything else will point to the vendor\n # i.e.: some of the Capirca platforms are named by the device vendor\n # e.g.: eOS => arista, junos => juniper, etc.\n return vendor\n"
] |
# -*- coding: utf-8 -*-
'''
NAPALM ACL
==========
Generate and load ACL (firewall) configuration on network devices.
.. versionadded:: 2017.7.0
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: capirca, napalm
:platform: unix
Dependencies
------------
The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
To install Capirca, execute: ``pip install capirca``.
To be able to load configuration on network devices,
it requires NAPALM_ library to be installed: ``pip install napalm``.
Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
log = logging.getLogger(__file__)
# Import third party libs
try:
# pylint: disable=W0611
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
HAS_CAPIRCA = True
# pylint: enable=W0611
except ImportError:
HAS_CAPIRCA = False
# import Salt modules
import salt.utils.napalm
from salt.utils.napalm import proxy_napalm_wrap
# ------------------------------------------------------------------------------
# module properties
# ------------------------------------------------------------------------------
__virtualname__ = 'netacl'
__proxyenabled__ = ['*']
# allow napalm proxy only
# ------------------------------------------------------------------------------
# property functions
# ------------------------------------------------------------------------------
def __virtual__():
'''
This module requires both NAPALM and Capirca.
'''
if HAS_CAPIRCA and salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
return (False, 'The netacl (napalm_acl) module cannot be loaded: \
Please install capirca and napalm.')
# ------------------------------------------------------------------------------
# helper functions -- will not be exported
# ------------------------------------------------------------------------------
def _get_capirca_platform(): # pylint: disable=too-many-return-statements
'''
Given the following NAPALM grains, we can determine the Capirca platform name:
- vendor
- device model
- operating system
Not the most optimal.
'''
vendor = __grains__['vendor'].lower()
os_ = __grains__['os'].lower()
model = __grains__['model'].lower()
if vendor == 'juniper' and 'srx' in model:
return 'junipersrx'
elif vendor == 'cisco' and os_ == 'ios':
return 'cisco'
elif vendor == 'cisco' and os_ == 'iosxr':
return 'ciscoxr'
elif vendor == 'cisco' and os_ == 'asa':
return 'ciscoasa'
elif os_ == 'linux':
return 'iptables'
elif vendor == 'palo alto networks':
return 'paloaltofw'
# anything else will point to the vendor
# i.e.: some of the Capirca platforms are named by the device vendor
# e.g.: eOS => arista, junos => juniper, etc.
return vendor
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
@proxy_napalm_wrap
def load_term_config(filter_name,
term_name,
filter_options=None,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
source_service=None,
destination_service=None,
**term_fields):
'''
Generate and load the configuration of a policy term.
filter_name
The name of the policy filter.
term_name
The name of the term.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
If the pillar contains the following structure:
.. code-block:: yaml
firewall:
- my-filter:
terms:
- my-term:
source_port: 1234
source_address:
- 1.2.3.4/32
- 5.6.7.8/32
The ``pillar_key`` field would be specified as ``firewall``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The properties specified through the CLI have higher priority than the pillar.
revision_id
Add a comment in the term config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the term configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a source_port and protocol.
As this module is available on Unix platforms only,
it reads the IANA_ port assignment from /etc/services.
If the user requires additional shortcuts to be referenced, they can add entries under /etc/services,
which can be managed using the :mod:`file state <salt.states.file>`.
.. _IANA: http://www.iana.org/assignments/port-numbers
destination_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a destination_port and protocol.
Allows the same options as ``source_service``.
term_fields
Term attributes. To see what fields are supported, please consult the
list of supported keywords_. Some platforms have a few other optional_
keywords.
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
.. note::
The following fields are accepted (some being platform-specific):
- action
- address
- address_exclude
- comment
- counter
- expiration
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- log_name
- loss_priority
- option
- policer
- port
- precedence
- principals
- protocol
- protocol_except
- qos
- pan_application
- routing_instance
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- packet_length
- fragment_offset
- hop_limit
- icmp_type
- ether_type
- traffic_class_count
- traffic_type
- translated
- dscp_set
- dscp_match
- dscp_except
- next_ip
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- vpn
- source_tag
- destination_tag
- source_interface
- destination_interface
- flattened
- flattened_addr
- flattened_saddr
- flattened_daddr
- priority
.. note::
The following fields can be also a single value and a list of values:
- action
- address
- address_exclude
- comment
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- option
- port
- precedence
- principals
- protocol
- protocol_except
- pan_application
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- icmp_type
- ether_type
- traffic_type
- dscp_match
- dscp_except
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- source_tag
- destination_tag
- source_service
- destination_service
Example: ``destination_address`` can be either defined as:
.. code-block:: yaml
destination_address: 172.17.17.1/24
or as a list of destination IP addresses:
.. code-block:: yaml
destination_address:
- 172.17.17.1/24
- 172.17.19.1/24
or a list of services to be matched:
.. code-block:: yaml
source_service:
- ntp
- snmp
- ldap
- bgpd
.. note::
The port fields ``source_port`` and ``destination_port`` can be used as above to select either
a single value, either a list of values, but also they can select port ranges. Example:
.. code-block:: yaml
source_port:
- - 1000
- 2000
- - 3000
- 4000
With the configuration above, the user is able to select the 1000-2000 and 3000-4000 source port ranges.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_term_config filter-name term-name source_address=1.2.3.4 destination_address=5.6.7.8 action=accept test=True debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
Configuration discarded.
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter filter-name {
+ interface-specific;
+ term term-name {
+ from {
+ source-address {
+ 1.2.3.4/32;
+ }
+ destination-address {
+ 5.6.7.8/32;
+ }
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter filter-name {
interface-specific;
term term-name {
from {
source-address {
1.2.3.4/32;
}
destination-address {
5.6.7.8/32;
}
}
then accept;
}
}
}
}
result:
True
'''
if not filter_options:
filter_options = []
platform = _get_capirca_platform()
term_config = __salt__['capirca.get_term_config'](platform,
filter_name,
term_name,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format,
source_service=source_service,
destination_service=destination_service,
**term_fields)
return __salt__['net.load_config'](text=term_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
# pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_policy_config(filters=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of the whole policy.
.. note::
The order of the filters and their terms is very important.
The configuration loaded on the device respects the order
defined in the ``filters`` and/or inside the pillar.
When merging the ``filters`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.flw01' netacl.load_policy_config debug=True
Output Example:
.. code-block:: text
edge01.flw01:
----------
already_configured:
False
comment:
diff:
---
+++
@@ -1228,9 +1228,24 @@
!
+ipv4 access-list my-filter
+ 10 remark my-term
+ 20 deny tcp host 1.2.3.4 eq 1234 any
+ 30 deny udp host 1.2.3.4 eq 1234 any
+ 40 deny tcp host 1.2.3.4 eq 1235 any
+ 50 deny udp host 1.2.3.4 eq 1235 any
+ 60 remark my-other-term
+ 70 permit tcp any range 5678 5680 any
+!
+!
+ipv4 access-list block-icmp
+ 10 remark first-term
+ 20 deny icmp any any
!
loaded_config:
! $Date: 2017/03/22 $
no ipv4 access-list my-filter
ipv4 access-list my-filter
remark my-term
deny tcp host 1.2.3.4 eq 1234 any
deny udp host 1.2.3.4 eq 1234 any
deny tcp host 1.2.3.4 eq 1235 any
deny udp host 1.2.3.4 eq 1235 any
remark my-other-term
permit tcp any range 5678 5680 any
exit
no ipv4 access-list block-icmp
ipv4 access-list block-icmp
remark first-term
deny icmp any any
exit
result:
True
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
acl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
protocol:
- tcp
- udp
source_address: 1.2.3.4
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- block-icmp:
terms:
- first-term:
protocol:
- icmp
action: reject
'''
if not filters:
filters = []
platform = _get_capirca_platform()
policy_config = __salt__['capirca.get_policy_config'](platform,
filters=filters,
prepend=prepend,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=policy_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
def get_filter_pillar(filter_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the filter configuration given its name.
filter_name
The name of the filter.
pillar_key
The root key of the whole policy config.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_filter_pillar'](filter_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
def get_term_pillar(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_term_pillar'](filter_name,
term_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
|
saltstack/salt
|
salt/modules/napalm_netacl.py
|
load_policy_config
|
python
|
def load_policy_config(filters=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of the whole policy.
.. note::
The order of the filters and their terms is very important.
The configuration loaded on the device respects the order
defined in the ``filters`` and/or inside the pillar.
When merging the ``filters`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.flw01' netacl.load_policy_config debug=True
Output Example:
.. code-block:: text
edge01.flw01:
----------
already_configured:
False
comment:
diff:
---
+++
@@ -1228,9 +1228,24 @@
!
+ipv4 access-list my-filter
+ 10 remark my-term
+ 20 deny tcp host 1.2.3.4 eq 1234 any
+ 30 deny udp host 1.2.3.4 eq 1234 any
+ 40 deny tcp host 1.2.3.4 eq 1235 any
+ 50 deny udp host 1.2.3.4 eq 1235 any
+ 60 remark my-other-term
+ 70 permit tcp any range 5678 5680 any
+!
+!
+ipv4 access-list block-icmp
+ 10 remark first-term
+ 20 deny icmp any any
!
loaded_config:
! $Date: 2017/03/22 $
no ipv4 access-list my-filter
ipv4 access-list my-filter
remark my-term
deny tcp host 1.2.3.4 eq 1234 any
deny udp host 1.2.3.4 eq 1234 any
deny tcp host 1.2.3.4 eq 1235 any
deny udp host 1.2.3.4 eq 1235 any
remark my-other-term
permit tcp any range 5678 5680 any
exit
no ipv4 access-list block-icmp
ipv4 access-list block-icmp
remark first-term
deny icmp any any
exit
result:
True
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
acl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
protocol:
- tcp
- udp
source_address: 1.2.3.4
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- block-icmp:
terms:
- first-term:
protocol:
- icmp
action: reject
'''
if not filters:
filters = []
platform = _get_capirca_platform()
policy_config = __salt__['capirca.get_policy_config'](platform,
filters=filters,
prepend=prepend,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=policy_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device)
|
Generate and load the configuration of the whole policy.
.. note::
The order of the filters and their terms is very important.
The configuration loaded on the device respects the order
defined in the ``filters`` and/or inside the pillar.
When merging the ``filters`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.flw01' netacl.load_policy_config debug=True
Output Example:
.. code-block:: text
edge01.flw01:
----------
already_configured:
False
comment:
diff:
---
+++
@@ -1228,9 +1228,24 @@
!
+ipv4 access-list my-filter
+ 10 remark my-term
+ 20 deny tcp host 1.2.3.4 eq 1234 any
+ 30 deny udp host 1.2.3.4 eq 1234 any
+ 40 deny tcp host 1.2.3.4 eq 1235 any
+ 50 deny udp host 1.2.3.4 eq 1235 any
+ 60 remark my-other-term
+ 70 permit tcp any range 5678 5680 any
+!
+!
+ipv4 access-list block-icmp
+ 10 remark first-term
+ 20 deny icmp any any
!
loaded_config:
! $Date: 2017/03/22 $
no ipv4 access-list my-filter
ipv4 access-list my-filter
remark my-term
deny tcp host 1.2.3.4 eq 1234 any
deny udp host 1.2.3.4 eq 1234 any
deny tcp host 1.2.3.4 eq 1235 any
deny udp host 1.2.3.4 eq 1235 any
remark my-other-term
permit tcp any range 5678 5680 any
exit
no ipv4 access-list block-icmp
ipv4 access-list block-icmp
remark first-term
deny icmp any any
exit
result:
True
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
acl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
protocol:
- tcp
- udp
source_address: 1.2.3.4
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- block-icmp:
terms:
- first-term:
protocol:
- icmp
action: reject
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_netacl.py#L669-L855
|
[
"def _get_capirca_platform(): # pylint: disable=too-many-return-statements\n '''\n Given the following NAPALM grains, we can determine the Capirca platform name:\n\n - vendor\n - device model\n - operating system\n\n Not the most optimal.\n '''\n vendor = __grains__['vendor'].lower()\n os_ = __grains__['os'].lower()\n model = __grains__['model'].lower()\n if vendor == 'juniper' and 'srx' in model:\n return 'junipersrx'\n elif vendor == 'cisco' and os_ == 'ios':\n return 'cisco'\n elif vendor == 'cisco' and os_ == 'iosxr':\n return 'ciscoxr'\n elif vendor == 'cisco' and os_ == 'asa':\n return 'ciscoasa'\n elif os_ == 'linux':\n return 'iptables'\n elif vendor == 'palo alto networks':\n return 'paloaltofw'\n # anything else will point to the vendor\n # i.e.: some of the Capirca platforms are named by the device vendor\n # e.g.: eOS => arista, junos => juniper, etc.\n return vendor\n"
] |
# -*- coding: utf-8 -*-
'''
NAPALM ACL
==========
Generate and load ACL (firewall) configuration on network devices.
.. versionadded:: 2017.7.0
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: capirca, napalm
:platform: unix
Dependencies
------------
The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
To install Capirca, execute: ``pip install capirca``.
To be able to load configuration on network devices,
it requires NAPALM_ library to be installed: ``pip install napalm``.
Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
log = logging.getLogger(__file__)
# Import third party libs
try:
# pylint: disable=W0611
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
HAS_CAPIRCA = True
# pylint: enable=W0611
except ImportError:
HAS_CAPIRCA = False
# import Salt modules
import salt.utils.napalm
from salt.utils.napalm import proxy_napalm_wrap
# ------------------------------------------------------------------------------
# module properties
# ------------------------------------------------------------------------------
__virtualname__ = 'netacl'
__proxyenabled__ = ['*']
# allow napalm proxy only
# ------------------------------------------------------------------------------
# property functions
# ------------------------------------------------------------------------------
def __virtual__():
'''
This module requires both NAPALM and Capirca.
'''
if HAS_CAPIRCA and salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
return (False, 'The netacl (napalm_acl) module cannot be loaded: \
Please install capirca and napalm.')
# ------------------------------------------------------------------------------
# helper functions -- will not be exported
# ------------------------------------------------------------------------------
def _get_capirca_platform(): # pylint: disable=too-many-return-statements
'''
Given the following NAPALM grains, we can determine the Capirca platform name:
- vendor
- device model
- operating system
Not the most optimal.
'''
vendor = __grains__['vendor'].lower()
os_ = __grains__['os'].lower()
model = __grains__['model'].lower()
if vendor == 'juniper' and 'srx' in model:
return 'junipersrx'
elif vendor == 'cisco' and os_ == 'ios':
return 'cisco'
elif vendor == 'cisco' and os_ == 'iosxr':
return 'ciscoxr'
elif vendor == 'cisco' and os_ == 'asa':
return 'ciscoasa'
elif os_ == 'linux':
return 'iptables'
elif vendor == 'palo alto networks':
return 'paloaltofw'
# anything else will point to the vendor
# i.e.: some of the Capirca platforms are named by the device vendor
# e.g.: eOS => arista, junos => juniper, etc.
return vendor
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
@proxy_napalm_wrap
def load_term_config(filter_name,
term_name,
filter_options=None,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
source_service=None,
destination_service=None,
**term_fields):
'''
Generate and load the configuration of a policy term.
filter_name
The name of the policy filter.
term_name
The name of the term.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
If the pillar contains the following structure:
.. code-block:: yaml
firewall:
- my-filter:
terms:
- my-term:
source_port: 1234
source_address:
- 1.2.3.4/32
- 5.6.7.8/32
The ``pillar_key`` field would be specified as ``firewall``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The properties specified through the CLI have higher priority than the pillar.
revision_id
Add a comment in the term config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the term configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a source_port and protocol.
As this module is available on Unix platforms only,
it reads the IANA_ port assignment from /etc/services.
If the user requires additional shortcuts to be referenced, they can add entries under /etc/services,
which can be managed using the :mod:`file state <salt.states.file>`.
.. _IANA: http://www.iana.org/assignments/port-numbers
destination_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a destination_port and protocol.
Allows the same options as ``source_service``.
term_fields
Term attributes. To see what fields are supported, please consult the
list of supported keywords_. Some platforms have a few other optional_
keywords.
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
.. note::
The following fields are accepted (some being platform-specific):
- action
- address
- address_exclude
- comment
- counter
- expiration
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- log_name
- loss_priority
- option
- policer
- port
- precedence
- principals
- protocol
- protocol_except
- qos
- pan_application
- routing_instance
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- packet_length
- fragment_offset
- hop_limit
- icmp_type
- ether_type
- traffic_class_count
- traffic_type
- translated
- dscp_set
- dscp_match
- dscp_except
- next_ip
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- vpn
- source_tag
- destination_tag
- source_interface
- destination_interface
- flattened
- flattened_addr
- flattened_saddr
- flattened_daddr
- priority
.. note::
The following fields can be also a single value and a list of values:
- action
- address
- address_exclude
- comment
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- option
- port
- precedence
- principals
- protocol
- protocol_except
- pan_application
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- icmp_type
- ether_type
- traffic_type
- dscp_match
- dscp_except
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- source_tag
- destination_tag
- source_service
- destination_service
Example: ``destination_address`` can be either defined as:
.. code-block:: yaml
destination_address: 172.17.17.1/24
or as a list of destination IP addresses:
.. code-block:: yaml
destination_address:
- 172.17.17.1/24
- 172.17.19.1/24
or a list of services to be matched:
.. code-block:: yaml
source_service:
- ntp
- snmp
- ldap
- bgpd
.. note::
The port fields ``source_port`` and ``destination_port`` can be used as above to select either
a single value, either a list of values, but also they can select port ranges. Example:
.. code-block:: yaml
source_port:
- - 1000
- 2000
- - 3000
- 4000
With the configuration above, the user is able to select the 1000-2000 and 3000-4000 source port ranges.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_term_config filter-name term-name source_address=1.2.3.4 destination_address=5.6.7.8 action=accept test=True debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
Configuration discarded.
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter filter-name {
+ interface-specific;
+ term term-name {
+ from {
+ source-address {
+ 1.2.3.4/32;
+ }
+ destination-address {
+ 5.6.7.8/32;
+ }
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter filter-name {
interface-specific;
term term-name {
from {
source-address {
1.2.3.4/32;
}
destination-address {
5.6.7.8/32;
}
}
then accept;
}
}
}
}
result:
True
'''
if not filter_options:
filter_options = []
platform = _get_capirca_platform()
term_config = __salt__['capirca.get_term_config'](platform,
filter_name,
term_name,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format,
source_service=source_service,
destination_service=destination_service,
**term_fields)
return __salt__['net.load_config'](text=term_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_filter_config(filter_name,
filter_options=None,
terms=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of a policy filter.
.. note::
The order of the terms is very important. The configuration loaded
on the device respects the order defined in the ``terms`` and/or
inside the pillar.
When merging the ``terms`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filter_name
The name of the policy filter.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
terms
List of terms for this policy filter.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of terms generated by merging
the terms from ``terms`` with those defined in the pillar (if any): new terms are prepended
at the beginning, while existing ones will preserve the position. To add the new terms
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the terms fields. Otherwise it will try
to merge also filters fields. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the filter config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the filter configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_filter_config my-filter pillar_key=netacl debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter my-filter {
+ interface-specific;
+ term my-term {
+ from {
+ source-port [ 1234 1235 ];
+ }
+ then {
+ reject;
+ }
+ }
+ term my-other-term {
+ from {
+ protocol tcp;
+ source-port 5678-5680;
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter my-filter {
interface-specific;
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
result:
True
The filter configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
action: reject
- my-other-term:
source_port:
- - 5678
- 5680
protocol: tcp
action: accept
'''
if not filter_options:
filter_options = []
if not terms:
terms = []
platform = _get_capirca_platform()
filter_config = __salt__['capirca.get_filter_config'](platform,
filter_name,
terms=terms,
prepend=prepend,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=filter_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
# pylint: disable=undefined-variable
def get_filter_pillar(filter_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the filter configuration given its name.
filter_name
The name of the filter.
pillar_key
The root key of the whole policy config.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_filter_pillar'](filter_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
def get_term_pillar(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_term_pillar'](filter_name,
term_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
|
saltstack/salt
|
salt/modules/napalm_netacl.py
|
get_filter_pillar
|
python
|
def get_filter_pillar(filter_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the filter configuration given its name.
filter_name
The name of the filter.
pillar_key
The root key of the whole policy config.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_filter_pillar'](filter_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
|
Helper that can be used inside a state SLS,
in order to get the filter configuration given its name.
filter_name
The name of the filter.
pillar_key
The root key of the whole policy config.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_netacl.py#L858-L883
| null |
# -*- coding: utf-8 -*-
'''
NAPALM ACL
==========
Generate and load ACL (firewall) configuration on network devices.
.. versionadded:: 2017.7.0
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: capirca, napalm
:platform: unix
Dependencies
------------
The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
To install Capirca, execute: ``pip install capirca``.
To be able to load configuration on network devices,
it requires NAPALM_ library to be installed: ``pip install napalm``.
Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
log = logging.getLogger(__file__)
# Import third party libs
try:
# pylint: disable=W0611
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
HAS_CAPIRCA = True
# pylint: enable=W0611
except ImportError:
HAS_CAPIRCA = False
# import Salt modules
import salt.utils.napalm
from salt.utils.napalm import proxy_napalm_wrap
# ------------------------------------------------------------------------------
# module properties
# ------------------------------------------------------------------------------
__virtualname__ = 'netacl'
__proxyenabled__ = ['*']
# allow napalm proxy only
# ------------------------------------------------------------------------------
# property functions
# ------------------------------------------------------------------------------
def __virtual__():
'''
This module requires both NAPALM and Capirca.
'''
if HAS_CAPIRCA and salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
return (False, 'The netacl (napalm_acl) module cannot be loaded: \
Please install capirca and napalm.')
# ------------------------------------------------------------------------------
# helper functions -- will not be exported
# ------------------------------------------------------------------------------
def _get_capirca_platform(): # pylint: disable=too-many-return-statements
'''
Given the following NAPALM grains, we can determine the Capirca platform name:
- vendor
- device model
- operating system
Not the most optimal.
'''
vendor = __grains__['vendor'].lower()
os_ = __grains__['os'].lower()
model = __grains__['model'].lower()
if vendor == 'juniper' and 'srx' in model:
return 'junipersrx'
elif vendor == 'cisco' and os_ == 'ios':
return 'cisco'
elif vendor == 'cisco' and os_ == 'iosxr':
return 'ciscoxr'
elif vendor == 'cisco' and os_ == 'asa':
return 'ciscoasa'
elif os_ == 'linux':
return 'iptables'
elif vendor == 'palo alto networks':
return 'paloaltofw'
# anything else will point to the vendor
# i.e.: some of the Capirca platforms are named by the device vendor
# e.g.: eOS => arista, junos => juniper, etc.
return vendor
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
@proxy_napalm_wrap
def load_term_config(filter_name,
term_name,
filter_options=None,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
source_service=None,
destination_service=None,
**term_fields):
'''
Generate and load the configuration of a policy term.
filter_name
The name of the policy filter.
term_name
The name of the term.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
If the pillar contains the following structure:
.. code-block:: yaml
firewall:
- my-filter:
terms:
- my-term:
source_port: 1234
source_address:
- 1.2.3.4/32
- 5.6.7.8/32
The ``pillar_key`` field would be specified as ``firewall``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The properties specified through the CLI have higher priority than the pillar.
revision_id
Add a comment in the term config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the term configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a source_port and protocol.
As this module is available on Unix platforms only,
it reads the IANA_ port assignment from /etc/services.
If the user requires additional shortcuts to be referenced, they can add entries under /etc/services,
which can be managed using the :mod:`file state <salt.states.file>`.
.. _IANA: http://www.iana.org/assignments/port-numbers
destination_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a destination_port and protocol.
Allows the same options as ``source_service``.
term_fields
Term attributes. To see what fields are supported, please consult the
list of supported keywords_. Some platforms have a few other optional_
keywords.
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
.. note::
The following fields are accepted (some being platform-specific):
- action
- address
- address_exclude
- comment
- counter
- expiration
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- log_name
- loss_priority
- option
- policer
- port
- precedence
- principals
- protocol
- protocol_except
- qos
- pan_application
- routing_instance
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- packet_length
- fragment_offset
- hop_limit
- icmp_type
- ether_type
- traffic_class_count
- traffic_type
- translated
- dscp_set
- dscp_match
- dscp_except
- next_ip
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- vpn
- source_tag
- destination_tag
- source_interface
- destination_interface
- flattened
- flattened_addr
- flattened_saddr
- flattened_daddr
- priority
.. note::
The following fields can be also a single value and a list of values:
- action
- address
- address_exclude
- comment
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- option
- port
- precedence
- principals
- protocol
- protocol_except
- pan_application
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- icmp_type
- ether_type
- traffic_type
- dscp_match
- dscp_except
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- source_tag
- destination_tag
- source_service
- destination_service
Example: ``destination_address`` can be either defined as:
.. code-block:: yaml
destination_address: 172.17.17.1/24
or as a list of destination IP addresses:
.. code-block:: yaml
destination_address:
- 172.17.17.1/24
- 172.17.19.1/24
or a list of services to be matched:
.. code-block:: yaml
source_service:
- ntp
- snmp
- ldap
- bgpd
.. note::
The port fields ``source_port`` and ``destination_port`` can be used as above to select either
a single value, either a list of values, but also they can select port ranges. Example:
.. code-block:: yaml
source_port:
- - 1000
- 2000
- - 3000
- 4000
With the configuration above, the user is able to select the 1000-2000 and 3000-4000 source port ranges.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_term_config filter-name term-name source_address=1.2.3.4 destination_address=5.6.7.8 action=accept test=True debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
Configuration discarded.
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter filter-name {
+ interface-specific;
+ term term-name {
+ from {
+ source-address {
+ 1.2.3.4/32;
+ }
+ destination-address {
+ 5.6.7.8/32;
+ }
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter filter-name {
interface-specific;
term term-name {
from {
source-address {
1.2.3.4/32;
}
destination-address {
5.6.7.8/32;
}
}
then accept;
}
}
}
}
result:
True
'''
if not filter_options:
filter_options = []
platform = _get_capirca_platform()
term_config = __salt__['capirca.get_term_config'](platform,
filter_name,
term_name,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format,
source_service=source_service,
destination_service=destination_service,
**term_fields)
return __salt__['net.load_config'](text=term_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_filter_config(filter_name,
filter_options=None,
terms=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of a policy filter.
.. note::
The order of the terms is very important. The configuration loaded
on the device respects the order defined in the ``terms`` and/or
inside the pillar.
When merging the ``terms`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filter_name
The name of the policy filter.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
terms
List of terms for this policy filter.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of terms generated by merging
the terms from ``terms`` with those defined in the pillar (if any): new terms are prepended
at the beginning, while existing ones will preserve the position. To add the new terms
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the terms fields. Otherwise it will try
to merge also filters fields. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the filter config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the filter configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_filter_config my-filter pillar_key=netacl debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter my-filter {
+ interface-specific;
+ term my-term {
+ from {
+ source-port [ 1234 1235 ];
+ }
+ then {
+ reject;
+ }
+ }
+ term my-other-term {
+ from {
+ protocol tcp;
+ source-port 5678-5680;
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter my-filter {
interface-specific;
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
result:
True
The filter configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
action: reject
- my-other-term:
source_port:
- - 5678
- 5680
protocol: tcp
action: accept
'''
if not filter_options:
filter_options = []
if not terms:
terms = []
platform = _get_capirca_platform()
filter_config = __salt__['capirca.get_filter_config'](platform,
filter_name,
terms=terms,
prepend=prepend,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=filter_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_policy_config(filters=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of the whole policy.
.. note::
The order of the filters and their terms is very important.
The configuration loaded on the device respects the order
defined in the ``filters`` and/or inside the pillar.
When merging the ``filters`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.flw01' netacl.load_policy_config debug=True
Output Example:
.. code-block:: text
edge01.flw01:
----------
already_configured:
False
comment:
diff:
---
+++
@@ -1228,9 +1228,24 @@
!
+ipv4 access-list my-filter
+ 10 remark my-term
+ 20 deny tcp host 1.2.3.4 eq 1234 any
+ 30 deny udp host 1.2.3.4 eq 1234 any
+ 40 deny tcp host 1.2.3.4 eq 1235 any
+ 50 deny udp host 1.2.3.4 eq 1235 any
+ 60 remark my-other-term
+ 70 permit tcp any range 5678 5680 any
+!
+!
+ipv4 access-list block-icmp
+ 10 remark first-term
+ 20 deny icmp any any
!
loaded_config:
! $Date: 2017/03/22 $
no ipv4 access-list my-filter
ipv4 access-list my-filter
remark my-term
deny tcp host 1.2.3.4 eq 1234 any
deny udp host 1.2.3.4 eq 1234 any
deny tcp host 1.2.3.4 eq 1235 any
deny udp host 1.2.3.4 eq 1235 any
remark my-other-term
permit tcp any range 5678 5680 any
exit
no ipv4 access-list block-icmp
ipv4 access-list block-icmp
remark first-term
deny icmp any any
exit
result:
True
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
acl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
protocol:
- tcp
- udp
source_address: 1.2.3.4
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- block-icmp:
terms:
- first-term:
protocol:
- icmp
action: reject
'''
if not filters:
filters = []
platform = _get_capirca_platform()
policy_config = __salt__['capirca.get_policy_config'](platform,
filters=filters,
prepend=prepend,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=policy_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
def get_term_pillar(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_term_pillar'](filter_name,
term_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
|
saltstack/salt
|
salt/modules/napalm_netacl.py
|
get_term_pillar
|
python
|
def get_term_pillar(filter_name,
term_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_term_pillar'](filter_name,
term_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
|
Helper that can be used inside a state SLS,
in order to get the term configuration given its name,
under a certain filter uniquely identified by its name.
filter_name
The name of the filter.
term_name
The name of the term.
pillar_key: ``acl``
The root key of the whole policy config. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_netacl.py#L886-L917
| null |
# -*- coding: utf-8 -*-
'''
NAPALM ACL
==========
Generate and load ACL (firewall) configuration on network devices.
.. versionadded:: 2017.7.0
:codeauthor: Mircea Ulinic <mircea@cloudflare.com>
:maturity: new
:depends: capirca, napalm
:platform: unix
Dependencies
------------
The firewall configuration is generated by Capirca_.
.. _Capirca: https://github.com/google/capirca
To install Capirca, execute: ``pip install capirca``.
To be able to load configuration on network devices,
it requires NAPALM_ library to be installed: ``pip install napalm``.
Please check Installation_ for complete details.
.. _NAPALM: https://napalm.readthedocs.io
.. _Installation: https://napalm.readthedocs.io/en/latest/installation.html
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
log = logging.getLogger(__file__)
# Import third party libs
try:
# pylint: disable=W0611
import capirca
import capirca.aclgen
import capirca.lib.policy
import capirca.lib.aclgenerator
HAS_CAPIRCA = True
# pylint: enable=W0611
except ImportError:
HAS_CAPIRCA = False
# import Salt modules
import salt.utils.napalm
from salt.utils.napalm import proxy_napalm_wrap
# ------------------------------------------------------------------------------
# module properties
# ------------------------------------------------------------------------------
__virtualname__ = 'netacl'
__proxyenabled__ = ['*']
# allow napalm proxy only
# ------------------------------------------------------------------------------
# property functions
# ------------------------------------------------------------------------------
def __virtual__():
'''
This module requires both NAPALM and Capirca.
'''
if HAS_CAPIRCA and salt.utils.napalm.virtual(__opts__, __virtualname__, __file__):
return __virtualname__
else:
return (False, 'The netacl (napalm_acl) module cannot be loaded: \
Please install capirca and napalm.')
# ------------------------------------------------------------------------------
# helper functions -- will not be exported
# ------------------------------------------------------------------------------
def _get_capirca_platform(): # pylint: disable=too-many-return-statements
'''
Given the following NAPALM grains, we can determine the Capirca platform name:
- vendor
- device model
- operating system
Not the most optimal.
'''
vendor = __grains__['vendor'].lower()
os_ = __grains__['os'].lower()
model = __grains__['model'].lower()
if vendor == 'juniper' and 'srx' in model:
return 'junipersrx'
elif vendor == 'cisco' and os_ == 'ios':
return 'cisco'
elif vendor == 'cisco' and os_ == 'iosxr':
return 'ciscoxr'
elif vendor == 'cisco' and os_ == 'asa':
return 'ciscoasa'
elif os_ == 'linux':
return 'iptables'
elif vendor == 'palo alto networks':
return 'paloaltofw'
# anything else will point to the vendor
# i.e.: some of the Capirca platforms are named by the device vendor
# e.g.: eOS => arista, junos => juniper, etc.
return vendor
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
@proxy_napalm_wrap
def load_term_config(filter_name,
term_name,
filter_options=None,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
source_service=None,
destination_service=None,
**term_fields):
'''
Generate and load the configuration of a policy term.
filter_name
The name of the policy filter.
term_name
The name of the term.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
If the pillar contains the following structure:
.. code-block:: yaml
firewall:
- my-filter:
terms:
- my-term:
source_port: 1234
source_address:
- 1.2.3.4/32
- 5.6.7.8/32
The ``pillar_key`` field would be specified as ``firewall``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The properties specified through the CLI have higher priority than the pillar.
revision_id
Add a comment in the term config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the term configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
source_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a source_port and protocol.
As this module is available on Unix platforms only,
it reads the IANA_ port assignment from /etc/services.
If the user requires additional shortcuts to be referenced, they can add entries under /etc/services,
which can be managed using the :mod:`file state <salt.states.file>`.
.. _IANA: http://www.iana.org/assignments/port-numbers
destination_service
A special service to choose from. This is a helper so the user is able to
select a source just using the name, instead of specifying a destination_port and protocol.
Allows the same options as ``source_service``.
term_fields
Term attributes. To see what fields are supported, please consult the
list of supported keywords_. Some platforms have a few other optional_
keywords.
.. _keywords: https://github.com/google/capirca/wiki/Policy-format#keywords
.. _optional: https://github.com/google/capirca/wiki/Policy-format#optionally-supported-keywords
.. note::
The following fields are accepted (some being platform-specific):
- action
- address
- address_exclude
- comment
- counter
- expiration
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- log_name
- loss_priority
- option
- policer
- port
- precedence
- principals
- protocol
- protocol_except
- qos
- pan_application
- routing_instance
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- packet_length
- fragment_offset
- hop_limit
- icmp_type
- ether_type
- traffic_class_count
- traffic_type
- translated
- dscp_set
- dscp_match
- dscp_except
- next_ip
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- vpn
- source_tag
- destination_tag
- source_interface
- destination_interface
- flattened
- flattened_addr
- flattened_saddr
- flattened_daddr
- priority
.. note::
The following fields can be also a single value and a list of values:
- action
- address
- address_exclude
- comment
- destination_address
- destination_address_exclude
- destination_port
- destination_prefix
- forwarding_class
- forwarding_class_except
- logging
- option
- port
- precedence
- principals
- protocol
- protocol_except
- pan_application
- source_address
- source_address_exclude
- source_port
- source_prefix
- verbatim
- icmp_type
- ether_type
- traffic_type
- dscp_match
- dscp_except
- flexible_match_range
- source_prefix_except
- destination_prefix_except
- source_tag
- destination_tag
- source_service
- destination_service
Example: ``destination_address`` can be either defined as:
.. code-block:: yaml
destination_address: 172.17.17.1/24
or as a list of destination IP addresses:
.. code-block:: yaml
destination_address:
- 172.17.17.1/24
- 172.17.19.1/24
or a list of services to be matched:
.. code-block:: yaml
source_service:
- ntp
- snmp
- ldap
- bgpd
.. note::
The port fields ``source_port`` and ``destination_port`` can be used as above to select either
a single value, either a list of values, but also they can select port ranges. Example:
.. code-block:: yaml
source_port:
- - 1000
- 2000
- - 3000
- 4000
With the configuration above, the user is able to select the 1000-2000 and 3000-4000 source port ranges.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_term_config filter-name term-name source_address=1.2.3.4 destination_address=5.6.7.8 action=accept test=True debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
Configuration discarded.
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter filter-name {
+ interface-specific;
+ term term-name {
+ from {
+ source-address {
+ 1.2.3.4/32;
+ }
+ destination-address {
+ 5.6.7.8/32;
+ }
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter filter-name {
interface-specific;
term term-name {
from {
source-address {
1.2.3.4/32;
}
destination-address {
5.6.7.8/32;
}
}
then accept;
}
}
}
}
result:
True
'''
if not filter_options:
filter_options = []
platform = _get_capirca_platform()
term_config = __salt__['capirca.get_term_config'](platform,
filter_name,
term_name,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format,
source_service=source_service,
destination_service=destination_service,
**term_fields)
return __salt__['net.load_config'](text=term_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_filter_config(filter_name,
filter_options=None,
terms=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of a policy filter.
.. note::
The order of the terms is very important. The configuration loaded
on the device respects the order defined in the ``terms`` and/or
inside the pillar.
When merging the ``terms`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filter_name
The name of the policy filter.
filter_options
Additional filter options. These options are platform-specific.
See the complete list of options_.
.. _options: https://github.com/google/capirca/wiki/Policy-format#header-section
terms
List of terms for this policy filter.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of terms generated by merging
the terms from ``terms`` with those defined in the pillar (if any): new terms are prepended
at the beginning, while existing ones will preserve the position. To add the new terms
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the terms fields. Otherwise it will try
to merge also filters fields. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the filter config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the filter configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.bjm01' netacl.load_filter_config my-filter pillar_key=netacl debug=True
Output Example:
.. code-block:: jinja
edge01.bjm01:
----------
already_configured:
False
comment:
diff:
[edit firewall]
+ family inet {
+ /*
+ ** $Date: 2017/03/22 $
+ **
+ */
+ filter my-filter {
+ interface-specific;
+ term my-term {
+ from {
+ source-port [ 1234 1235 ];
+ }
+ then {
+ reject;
+ }
+ }
+ term my-other-term {
+ from {
+ protocol tcp;
+ source-port 5678-5680;
+ }
+ then accept;
+ }
+ }
+ }
loaded_config:
firewall {
family inet {
replace:
/*
** $Date: 2017/03/22 $
**
*/
filter my-filter {
interface-specific;
term my-term {
from {
source-port [ 1234 1235 ];
}
then {
reject;
}
}
term my-other-term {
from {
protocol tcp;
source-port 5678-5680;
}
then accept;
}
}
}
}
result:
True
The filter configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
netacl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
action: reject
- my-other-term:
source_port:
- - 5678
- 5680
protocol: tcp
action: accept
'''
if not filter_options:
filter_options = []
if not terms:
terms = []
platform = _get_capirca_platform()
filter_config = __salt__['capirca.get_filter_config'](platform,
filter_name,
terms=terms,
prepend=prepend,
filter_options=filter_options,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=filter_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
@proxy_napalm_wrap
def load_policy_config(filters=None,
prepend=True,
pillar_key='acl',
pillarenv=None,
saltenv=None,
merge_pillar=True,
only_lower_merge=False,
revision_id=None,
revision_no=None,
revision_date=True,
revision_date_format='%Y/%m/%d',
test=False,
commit=True,
debug=False,
**kwargs): # pylint: disable=unused-argument
'''
Generate and load the configuration of the whole policy.
.. note::
The order of the filters and their terms is very important.
The configuration loaded on the device respects the order
defined in the ``filters`` and/or inside the pillar.
When merging the ``filters`` with the pillar data, consider the
``prepend`` argument to make sure the order is correct!
filters
List of filters for this policy.
If not specified or empty, will try to load the configuration from the pillar,
unless ``merge_pillar`` is set as ``False``.
prepend: ``True``
When ``merge_pillar`` is set as ``True``, the final list of filters generated by merging
the filters from ``filters`` with those defined in the pillar (if any): new filters are prepended
at the beginning, while existing ones will preserve the position. To add the new filters
at the end of the list, set this argument to ``False``.
pillar_key: ``acl``
The key in the pillar containing the default attributes values. Default: ``acl``.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
merge_pillar: ``True``
Merge the CLI variables with the pillar. Default: ``True``.
The merge logic depends on the ``prepend`` argument and
the CLI has higher priority than the pillar.
only_lower_merge: ``False``
Specify if it should merge only the filters and terms fields. Otherwise it will try
to merge everything at the policy level. Default: ``False``.
This option requires ``merge_pillar``, otherwise it is ignored.
revision_id
Add a comment in the policy config having the description for the changes applied.
revision_no
The revision count.
revision_date: ``True``
Boolean flag: display the date when the policy configuration was generated. Default: ``True``.
revision_date_format: ``%Y/%m/%d``
The date format to be used when generating the perforce data. Default: ``%Y/%m/%d`` (<year>/<month>/<day>).
test: ``False``
Dry run? If set as ``True``, will apply the config, discard and return the changes.
Default: ``False`` and will commit the changes on the device.
commit: ``True``
Commit? Default: ``True``.
debug: ``False``
Debug mode. Will insert a new key under the output dictionary,
as ``loaded_config`` containing the raw configuration loaded on the device.
The output is a dictionary having the same form as :mod:`net.load_config <salt.modules.napalm_network.load_config>`.
CLI Example:
.. code-block:: bash
salt 'edge01.flw01' netacl.load_policy_config debug=True
Output Example:
.. code-block:: text
edge01.flw01:
----------
already_configured:
False
comment:
diff:
---
+++
@@ -1228,9 +1228,24 @@
!
+ipv4 access-list my-filter
+ 10 remark my-term
+ 20 deny tcp host 1.2.3.4 eq 1234 any
+ 30 deny udp host 1.2.3.4 eq 1234 any
+ 40 deny tcp host 1.2.3.4 eq 1235 any
+ 50 deny udp host 1.2.3.4 eq 1235 any
+ 60 remark my-other-term
+ 70 permit tcp any range 5678 5680 any
+!
+!
+ipv4 access-list block-icmp
+ 10 remark first-term
+ 20 deny icmp any any
!
loaded_config:
! $Date: 2017/03/22 $
no ipv4 access-list my-filter
ipv4 access-list my-filter
remark my-term
deny tcp host 1.2.3.4 eq 1234 any
deny udp host 1.2.3.4 eq 1234 any
deny tcp host 1.2.3.4 eq 1235 any
deny udp host 1.2.3.4 eq 1235 any
remark my-other-term
permit tcp any range 5678 5680 any
exit
no ipv4 access-list block-icmp
ipv4 access-list block-icmp
remark first-term
deny icmp any any
exit
result:
True
The policy configuration has been loaded from the pillar, having the following structure:
.. code-block:: yaml
acl:
- my-filter:
terms:
- my-term:
source_port:
- 1234
- 1235
protocol:
- tcp
- udp
source_address: 1.2.3.4
action: reject
- my-other-term:
source_port:
- [5678, 5680]
protocol: tcp
action: accept
- block-icmp:
terms:
- first-term:
protocol:
- icmp
action: reject
'''
if not filters:
filters = []
platform = _get_capirca_platform()
policy_config = __salt__['capirca.get_policy_config'](platform,
filters=filters,
prepend=prepend,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv,
merge_pillar=merge_pillar,
only_lower_merge=only_lower_merge,
revision_id=revision_id,
revision_no=revision_no,
revision_date=revision_date,
revision_date_format=revision_date_format)
return __salt__['net.load_config'](text=policy_config,
test=test,
commit=commit,
debug=debug,
inherit_napalm_device=napalm_device) # pylint: disable=undefined-variable
def get_filter_pillar(filter_name,
pillar_key='acl',
pillarenv=None,
saltenv=None):
'''
Helper that can be used inside a state SLS,
in order to get the filter configuration given its name.
filter_name
The name of the filter.
pillar_key
The root key of the whole policy config.
pillarenv
Query the master to generate fresh pillar data on the fly,
specifically from the requested pillar environment.
saltenv
Included only for compatibility with
:conf_minion:`pillarenv_from_saltenv`, and is otherwise ignored.
'''
return __salt__['capirca.get_filter_pillar'](filter_name,
pillar_key=pillar_key,
pillarenv=pillarenv,
saltenv=saltenv)
|
saltstack/salt
|
salt/modules/purefb.py
|
_get_blade
|
python
|
def _get_blade():
'''
Get Pure Storage FlasBlade configuration
1) From the minion config
pure_tags:
fb:
san_ip: management vip or hostname for the FlashBlade
api_token: A valid api token for the FlashBlade being managed
2) From environment (PUREFB_IP and PUREFB_API)
3) From the pillar (PUREFB_IP and PUREFB_API)
'''
try:
blade_name = __opts__['pure_tags']['fb'].get('san_ip')
api_token = __opts__['pure_tags']['fb'].get('api_token')
if blade_name and api:
blade = PurityFb(blade_name)
blade.disable_verify_ssl()
except (KeyError, NameError, TypeError):
try:
blade_name = os.environ.get('PUREFB_IP')
api_token = os.environ.get('PUREFB_API')
if blade_name:
blade = PurityFb(blade_name)
blade.disable_verify_ssl()
except (ValueError, KeyError, NameError):
try:
api_token = __pillar__['PUREFB_API']
blade = PurityFb(__pillar__['PUREFB_IP'])
blade.disable_verify_ssl()
except (KeyError, NameError):
raise CommandExecutionError('No Pure Storage FlashBlade credentials found.')
try:
blade.login(api_token)
except Exception:
raise CommandExecutionError('Pure Storage FlashBlade authentication failed.')
return blade
|
Get Pure Storage FlasBlade configuration
1) From the minion config
pure_tags:
fb:
san_ip: management vip or hostname for the FlashBlade
api_token: A valid api token for the FlashBlade being managed
2) From environment (PUREFB_IP and PUREFB_API)
3) From the pillar (PUREFB_IP and PUREFB_API)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/purefb.py#L85-L123
| null |
# -*- coding: utf-8 -*-
##
# Copyright 2018 Pure Storage Inc
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Management of Pure Storage FlashBlade
Installation Prerequisites
--------------------------
- You will need the ``purity_fb`` python package in your python installation
path that is running salt.
.. code-block:: bash
pip install purity_fb
- Configure Pure Storage FlashBlade authentication. Use one of the following
three methods.
1) From the minion config
.. code-block:: yaml
pure_tags:
fb:
san_ip: management vip or hostname for the FlashBlade
api_token: A valid api token for the FlashBlade being managed
2) From environment (PUREFB_IP and PUREFB_API)
3) From the pillar (PUREFB_IP and PUREFB_API)
:maintainer: Simon Dodsley (simon@purestorage.com)
:maturity: new
:requires: purity_fb
:platform: all
.. versionadded:: 2019.2.0
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
from datetime import datetime
# Import Salt libs
from salt.ext import six
from salt.exceptions import CommandExecutionError
# Import 3rd party modules
try:
from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix
from purity_fb import rest, NfsRule, ProtocolRule
HAS_PURITY_FB = True
except ImportError:
HAS_PURITY_FB = False
__docformat__ = 'restructuredtext en'
__virtualname__ = 'purefb'
def __virtual__():
'''
Determine whether or not to load this module
'''
if HAS_PURITY_FB:
return __virtualname__
return (False, 'purefb execution module not loaded: purity_fb python library not available.')
def _get_fs(name, blade):
'''
Private function to
check for existance of a filesystem
'''
_fs = []
_fs.append(name)
try:
res = blade.file_systems.list_file_systems(names=_fs)
return res.items[0]
except rest.ApiException:
return None
def _get_snapshot(name, suffix, blade):
'''
Return name of Snapshot
or None
'''
try:
filt = 'source=\'{}\' and suffix=\'{}\''.format(name, suffix)
res = blade.file_system_snapshots.list_file_system_snapshots(filter=filt)
return res.items[0]
except rest.ApiException:
return None
def _get_deleted_fs(name, blade):
'''
Private function to check
if a file systeem has already been deleted
'''
try:
_fs = _get_fs(name, blade)
if _fs and _fs.destroyed:
return _fs
except rest.ApiException:
return None
def snap_create(name, suffix=None):
'''
Create a filesystem snapshot on a Pure Storage FlashBlade.
Will return False if filesystem selected to snap does not exist.
.. versionadded:: 2019.2.0
name : string
name of filesystem to snapshot
suffix : string
if specificed forces snapshot name suffix. If not specified defaults to timestamp.
CLI Example:
.. code-block:: bash
salt '*' purefb.snap_create foo
salt '*' purefb.snap_create foo suffix=bar
'''
blade = _get_blade()
if suffix is None:
suffix = ('snap-' +
six.text_type((datetime.utcnow() -
datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()))
suffix = suffix.replace('.', '')
if _get_fs(name, blade) is not None:
try:
source = []
source.append(name)
blade.file_system_snapshots.create_file_system_snapshots(sources=source,
suffix=SnapshotSuffix(suffix))
return True
except rest.ApiException:
return False
else:
return False
def snap_delete(name, suffix=None, eradicate=False):
'''
Delete a filesystem snapshot on a Pure Storage FlashBlade.
Will return False if selected snapshot does not exist.
.. versionadded:: 2019.2.0
name : string
name of filesystem
suffix : string
name of snapshot
eradicate : boolean
Eradicate snapshot after deletion if True. Default is False
CLI Example:
.. code-block:: bash
salt '*' purefb.snap_delete foo suffix=snap eradicate=True
'''
blade = _get_blade()
if _get_snapshot(name, suffix, blade) is not None:
try:
snapname = name + '.' + suffix
new_attr = FileSystemSnapshot(destroyed=True)
blade.file_system_snapshots.update_file_system_snapshots(name=snapname,
attributes=new_attr)
except rest.ApiException:
return False
if eradicate is True:
try:
blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
return True
except rest.ApiException:
return False
else:
return True
else:
return False
def snap_eradicate(name, suffix=None):
'''
Eradicate a deleted filesystem snapshot on a Pure Storage FlashBlade.
Will return False if snapshot is not in a deleted state.
.. versionadded:: 2019.2.0
name : string
name of filesystem
suffix : string
name of snapshot
CLI Example:
.. code-block:: bash
salt '*' purefb.snap_eradicate foo suffix=snap
'''
blade = _get_blade()
if _get_snapshot(name, suffix, blade) is not None:
snapname = name + '.' + suffix
try:
blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
return True
except rest.ApiException:
return False
else:
return False
def fs_create(name, size=None, proto='NFS', nfs_rules='*(rw,no_root_squash)', snapshot=False):
'''
Create a filesystem on a Pure Storage FlashBlade.
Will return False if filesystem already exists.
.. versionadded:: 2019.2.0
name : string
name of filesystem (truncated to 63 characters)
proto : string
(Optional) Sharing protocol (NFS, CIFS or HTTP). If not specified default is NFS
snapshot: boolean
(Optional) Are snapshots enabled on the filesystem. Default is False
nfs_rules : string
(Optional) export rules for NFS. If not specified default is
``*(rw,no_root_squash)``. Refer to Pure Storage documentation for
formatting rules.
size : string
if specified capacity of filesystem. If not specified default to 32G.
Refer to Pure Storage documentation for formatting rules.
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_create foo proto=CIFS
salt '*' purefb.fs_create foo size=10T
'''
if len(name) > 63:
name = name[0:63]
blade = _get_blade()
print(proto)
if _get_fs(name, blade) is None:
if size is None:
size = __utils__['stringutils.human_to_bytes']('32G')
else:
size = __utils__['stringutils.human_to_bytes'](size)
if proto.lower() == 'nfs':
fs_obj = FileSystem(name=name,
provisioned=size,
fast_remove_directory_enabled=True,
snapshot_directory_enabled=snapshot,
nfs=NfsRule(enabled=True, rules=nfs_rules),
)
elif proto.lower() == 'cifs':
fs_obj = FileSystem(name=name,
provisioned=size,
fast_remove_directory_enabled=True,
snapshot_directory_enabled=snapshot,
smb=ProtocolRule(enabled=True),
)
elif proto.lower() == 'http':
fs_obj = FileSystem(name=name,
provisioned=size,
fast_remove_directory_enabled=True,
snapshot_directory_enabled=snapshot,
http=ProtocolRule(enabled=True),
)
else:
return False
try:
blade.file_systems.create_file_systems(fs_obj)
return True
except rest.ApiException:
return False
else:
return False
def fs_delete(name, eradicate=False):
'''
Delete a share on a Pure Storage FlashBlade.
Will return False if filesystem doesn't exist or is already in a deleted state.
.. versionadded:: 2019.2.0
name : string
name of filesystem
eradicate : boolean
(Optional) Eradicate filesystem after deletion if True. Default is False
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_delete foo eradicate=True
'''
blade = _get_blade()
if _get_fs(name, blade) is not None:
try:
blade.file_systems.update_file_systems(name=name,
attributes=FileSystem(nfs=NfsRule(enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True)
)
except rest.ApiException:
return False
if eradicate is True:
try:
blade.file_systems.delete_file_systems(name)
return True
except rest.ApiException:
return False
else:
return True
else:
return False
def fs_eradicate(name):
'''
Eradicate a deleted filesystem on a Pure Storage FlashBlade.
Will return False is filesystem is not in a deleted state.
.. versionadded:: 2019.2.0
name : string
name of filesystem
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_eradicate foo
'''
blade = _get_blade()
if _get_deleted_fs(name, blade) is not None:
try:
blade.file_systems.delete_file_systems(name)
return True
except rest.ApiException:
return False
else:
return False
def fs_extend(name, size):
'''
Resize an existing filesystem on a Pure Storage FlashBlade.
Will return False if new size is less than or equal to existing size.
.. versionadded:: 2019.2.0
name : string
name of filesystem
size : string
New capacity of filesystem.
Refer to Pure Storage documentation for formatting rules.
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_extend foo 10T
'''
attr = {}
blade = _get_blade()
_fs = _get_fs(name, blade)
if _fs is not None:
if __utils__['stringutils.human_to_bytes'](size) > _fs.provisioned:
try:
attr['provisioned'] = __utils__['stringutils.human_to_bytes'](size)
n_attr = FileSystem(**attr)
blade.file_systems.update_file_systems(name=name, attributes=n_attr)
return True
except rest.ApiException:
return False
else:
return False
else:
return False
def fs_update(name, rules, snapshot=False):
'''
Update filesystem on a Pure Storage FlashBlade.
Allows for change of NFS export rules and enabling/disabled
of snapshotting capability.
.. versionadded:: 2019.2.0
name : string
name of filesystem
rules : string
NFS export rules for filesystem
Refer to Pure Storage documentation for formatting rules.
snapshot: boolean
(Optional) Enable/Disable snapshots on the filesystem. Default is False
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_nfs_update foo rules='10.234.112.23(ro), 10.234.112.24(rw)' snapshot=True
'''
blade = _get_blade()
attr = {}
_fs = _get_fs(name, blade)
if _fs is not None:
try:
if _fs.nfs.enabled:
attr['nfs'] = NfsRule(rules=rules)
attr['snapshot_directory_enabled'] = snapshot
n_attr = FileSystem(**attr)
blade.file_systems.update_file_systems(name=name, attributes=n_attr)
return True
except rest.ApiException:
return False
else:
return False
|
saltstack/salt
|
salt/modules/purefb.py
|
_get_fs
|
python
|
def _get_fs(name, blade):
'''
Private function to
check for existance of a filesystem
'''
_fs = []
_fs.append(name)
try:
res = blade.file_systems.list_file_systems(names=_fs)
return res.items[0]
except rest.ApiException:
return None
|
Private function to
check for existance of a filesystem
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/purefb.py#L126-L137
| null |
# -*- coding: utf-8 -*-
##
# Copyright 2018 Pure Storage Inc
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Management of Pure Storage FlashBlade
Installation Prerequisites
--------------------------
- You will need the ``purity_fb`` python package in your python installation
path that is running salt.
.. code-block:: bash
pip install purity_fb
- Configure Pure Storage FlashBlade authentication. Use one of the following
three methods.
1) From the minion config
.. code-block:: yaml
pure_tags:
fb:
san_ip: management vip or hostname for the FlashBlade
api_token: A valid api token for the FlashBlade being managed
2) From environment (PUREFB_IP and PUREFB_API)
3) From the pillar (PUREFB_IP and PUREFB_API)
:maintainer: Simon Dodsley (simon@purestorage.com)
:maturity: new
:requires: purity_fb
:platform: all
.. versionadded:: 2019.2.0
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
from datetime import datetime
# Import Salt libs
from salt.ext import six
from salt.exceptions import CommandExecutionError
# Import 3rd party modules
try:
from purity_fb import PurityFb, FileSystem, FileSystemSnapshot, SnapshotSuffix
from purity_fb import rest, NfsRule, ProtocolRule
HAS_PURITY_FB = True
except ImportError:
HAS_PURITY_FB = False
__docformat__ = 'restructuredtext en'
__virtualname__ = 'purefb'
def __virtual__():
'''
Determine whether or not to load this module
'''
if HAS_PURITY_FB:
return __virtualname__
return (False, 'purefb execution module not loaded: purity_fb python library not available.')
def _get_blade():
'''
Get Pure Storage FlasBlade configuration
1) From the minion config
pure_tags:
fb:
san_ip: management vip or hostname for the FlashBlade
api_token: A valid api token for the FlashBlade being managed
2) From environment (PUREFB_IP and PUREFB_API)
3) From the pillar (PUREFB_IP and PUREFB_API)
'''
try:
blade_name = __opts__['pure_tags']['fb'].get('san_ip')
api_token = __opts__['pure_tags']['fb'].get('api_token')
if blade_name and api:
blade = PurityFb(blade_name)
blade.disable_verify_ssl()
except (KeyError, NameError, TypeError):
try:
blade_name = os.environ.get('PUREFB_IP')
api_token = os.environ.get('PUREFB_API')
if blade_name:
blade = PurityFb(blade_name)
blade.disable_verify_ssl()
except (ValueError, KeyError, NameError):
try:
api_token = __pillar__['PUREFB_API']
blade = PurityFb(__pillar__['PUREFB_IP'])
blade.disable_verify_ssl()
except (KeyError, NameError):
raise CommandExecutionError('No Pure Storage FlashBlade credentials found.')
try:
blade.login(api_token)
except Exception:
raise CommandExecutionError('Pure Storage FlashBlade authentication failed.')
return blade
def _get_snapshot(name, suffix, blade):
'''
Return name of Snapshot
or None
'''
try:
filt = 'source=\'{}\' and suffix=\'{}\''.format(name, suffix)
res = blade.file_system_snapshots.list_file_system_snapshots(filter=filt)
return res.items[0]
except rest.ApiException:
return None
def _get_deleted_fs(name, blade):
'''
Private function to check
if a file systeem has already been deleted
'''
try:
_fs = _get_fs(name, blade)
if _fs and _fs.destroyed:
return _fs
except rest.ApiException:
return None
def snap_create(name, suffix=None):
'''
Create a filesystem snapshot on a Pure Storage FlashBlade.
Will return False if filesystem selected to snap does not exist.
.. versionadded:: 2019.2.0
name : string
name of filesystem to snapshot
suffix : string
if specificed forces snapshot name suffix. If not specified defaults to timestamp.
CLI Example:
.. code-block:: bash
salt '*' purefb.snap_create foo
salt '*' purefb.snap_create foo suffix=bar
'''
blade = _get_blade()
if suffix is None:
suffix = ('snap-' +
six.text_type((datetime.utcnow() -
datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds()))
suffix = suffix.replace('.', '')
if _get_fs(name, blade) is not None:
try:
source = []
source.append(name)
blade.file_system_snapshots.create_file_system_snapshots(sources=source,
suffix=SnapshotSuffix(suffix))
return True
except rest.ApiException:
return False
else:
return False
def snap_delete(name, suffix=None, eradicate=False):
'''
Delete a filesystem snapshot on a Pure Storage FlashBlade.
Will return False if selected snapshot does not exist.
.. versionadded:: 2019.2.0
name : string
name of filesystem
suffix : string
name of snapshot
eradicate : boolean
Eradicate snapshot after deletion if True. Default is False
CLI Example:
.. code-block:: bash
salt '*' purefb.snap_delete foo suffix=snap eradicate=True
'''
blade = _get_blade()
if _get_snapshot(name, suffix, blade) is not None:
try:
snapname = name + '.' + suffix
new_attr = FileSystemSnapshot(destroyed=True)
blade.file_system_snapshots.update_file_system_snapshots(name=snapname,
attributes=new_attr)
except rest.ApiException:
return False
if eradicate is True:
try:
blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
return True
except rest.ApiException:
return False
else:
return True
else:
return False
def snap_eradicate(name, suffix=None):
'''
Eradicate a deleted filesystem snapshot on a Pure Storage FlashBlade.
Will return False if snapshot is not in a deleted state.
.. versionadded:: 2019.2.0
name : string
name of filesystem
suffix : string
name of snapshot
CLI Example:
.. code-block:: bash
salt '*' purefb.snap_eradicate foo suffix=snap
'''
blade = _get_blade()
if _get_snapshot(name, suffix, blade) is not None:
snapname = name + '.' + suffix
try:
blade.file_system_snapshots.delete_file_system_snapshots(name=snapname)
return True
except rest.ApiException:
return False
else:
return False
def fs_create(name, size=None, proto='NFS', nfs_rules='*(rw,no_root_squash)', snapshot=False):
'''
Create a filesystem on a Pure Storage FlashBlade.
Will return False if filesystem already exists.
.. versionadded:: 2019.2.0
name : string
name of filesystem (truncated to 63 characters)
proto : string
(Optional) Sharing protocol (NFS, CIFS or HTTP). If not specified default is NFS
snapshot: boolean
(Optional) Are snapshots enabled on the filesystem. Default is False
nfs_rules : string
(Optional) export rules for NFS. If not specified default is
``*(rw,no_root_squash)``. Refer to Pure Storage documentation for
formatting rules.
size : string
if specified capacity of filesystem. If not specified default to 32G.
Refer to Pure Storage documentation for formatting rules.
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_create foo proto=CIFS
salt '*' purefb.fs_create foo size=10T
'''
if len(name) > 63:
name = name[0:63]
blade = _get_blade()
print(proto)
if _get_fs(name, blade) is None:
if size is None:
size = __utils__['stringutils.human_to_bytes']('32G')
else:
size = __utils__['stringutils.human_to_bytes'](size)
if proto.lower() == 'nfs':
fs_obj = FileSystem(name=name,
provisioned=size,
fast_remove_directory_enabled=True,
snapshot_directory_enabled=snapshot,
nfs=NfsRule(enabled=True, rules=nfs_rules),
)
elif proto.lower() == 'cifs':
fs_obj = FileSystem(name=name,
provisioned=size,
fast_remove_directory_enabled=True,
snapshot_directory_enabled=snapshot,
smb=ProtocolRule(enabled=True),
)
elif proto.lower() == 'http':
fs_obj = FileSystem(name=name,
provisioned=size,
fast_remove_directory_enabled=True,
snapshot_directory_enabled=snapshot,
http=ProtocolRule(enabled=True),
)
else:
return False
try:
blade.file_systems.create_file_systems(fs_obj)
return True
except rest.ApiException:
return False
else:
return False
def fs_delete(name, eradicate=False):
'''
Delete a share on a Pure Storage FlashBlade.
Will return False if filesystem doesn't exist or is already in a deleted state.
.. versionadded:: 2019.2.0
name : string
name of filesystem
eradicate : boolean
(Optional) Eradicate filesystem after deletion if True. Default is False
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_delete foo eradicate=True
'''
blade = _get_blade()
if _get_fs(name, blade) is not None:
try:
blade.file_systems.update_file_systems(name=name,
attributes=FileSystem(nfs=NfsRule(enabled=False),
smb=ProtocolRule(enabled=False),
http=ProtocolRule(enabled=False),
destroyed=True)
)
except rest.ApiException:
return False
if eradicate is True:
try:
blade.file_systems.delete_file_systems(name)
return True
except rest.ApiException:
return False
else:
return True
else:
return False
def fs_eradicate(name):
'''
Eradicate a deleted filesystem on a Pure Storage FlashBlade.
Will return False is filesystem is not in a deleted state.
.. versionadded:: 2019.2.0
name : string
name of filesystem
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_eradicate foo
'''
blade = _get_blade()
if _get_deleted_fs(name, blade) is not None:
try:
blade.file_systems.delete_file_systems(name)
return True
except rest.ApiException:
return False
else:
return False
def fs_extend(name, size):
'''
Resize an existing filesystem on a Pure Storage FlashBlade.
Will return False if new size is less than or equal to existing size.
.. versionadded:: 2019.2.0
name : string
name of filesystem
size : string
New capacity of filesystem.
Refer to Pure Storage documentation for formatting rules.
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_extend foo 10T
'''
attr = {}
blade = _get_blade()
_fs = _get_fs(name, blade)
if _fs is not None:
if __utils__['stringutils.human_to_bytes'](size) > _fs.provisioned:
try:
attr['provisioned'] = __utils__['stringutils.human_to_bytes'](size)
n_attr = FileSystem(**attr)
blade.file_systems.update_file_systems(name=name, attributes=n_attr)
return True
except rest.ApiException:
return False
else:
return False
else:
return False
def fs_update(name, rules, snapshot=False):
'''
Update filesystem on a Pure Storage FlashBlade.
Allows for change of NFS export rules and enabling/disabled
of snapshotting capability.
.. versionadded:: 2019.2.0
name : string
name of filesystem
rules : string
NFS export rules for filesystem
Refer to Pure Storage documentation for formatting rules.
snapshot: boolean
(Optional) Enable/Disable snapshots on the filesystem. Default is False
CLI Example:
.. code-block:: bash
salt '*' purefb.fs_nfs_update foo rules='10.234.112.23(ro), 10.234.112.24(rw)' snapshot=True
'''
blade = _get_blade()
attr = {}
_fs = _get_fs(name, blade)
if _fs is not None:
try:
if _fs.nfs.enabled:
attr['nfs'] = NfsRule(rules=rules)
attr['snapshot_directory_enabled'] = snapshot
n_attr = FileSystem(**attr)
blade.file_systems.update_file_systems(name=name, attributes=n_attr)
return True
except rest.ApiException:
return False
else:
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.