repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
saltstack/salt
|
salt/returners/redis_return.py
|
get_jids
|
python
|
def get_jids():
'''
Return a dict mapping all job ids to job information
'''
serv = _get_serv(ret=None)
ret = {}
for s in serv.mget(serv.keys('load:*')):
if s is None:
continue
load = salt.utils.json.loads(s)
jid = load['jid']
ret[jid] = salt.utils.jid.format_jid_instance(jid, load)
return ret
|
Return a dict mapping all job ids to job information
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/redis_return.py#L277-L289
|
[
"def _get_serv(ret=None):\n '''\n Return a redis server object\n '''\n _options = _get_options(ret)\n global REDIS_POOL\n if REDIS_POOL:\n return REDIS_POOL\n elif _options.get('cluster_mode'):\n REDIS_POOL = StrictRedisCluster(startup_nodes=_options.get('startup_nodes'),\n skip_full_coverage_check=_options.get('skip_full_coverage_check'),\n decode_responses=True)\n else:\n REDIS_POOL = redis.StrictRedis(host=_options.get('host'),\n port=_options.get('port'),\n unix_socket_path=_options.get('unix_socket_path', None),\n db=_options.get('db'),\n decode_responses=True,\n password=_options.get('password'))\n return REDIS_POOL\n"
] |
# -*- coding: utf-8 -*-
'''
Return data to a redis server
To enable this returner the minion will need the python client for redis
installed and the following values configured in the minion or master
config, these are the defaults:
.. code-block:: yaml
redis.db: '0'
redis.host: 'salt'
redis.port: 6379
redis.password: ''
.. versionadded:: 2018.3.1
Alternatively a UNIX socket can be specified by `unix_socket_path`:
.. code-block:: yaml
redis.db: '0'
redis.unix_socket_path: /var/run/redis/redis.sock
Cluster Mode Example:
.. code-block:: yaml
redis.db: '0'
redis.cluster_mode: true
redis.cluster.skip_full_coverage_check: true
redis.cluster.startup_nodes:
- host: redis-member-1
port: 6379
- host: redis-member-2
port: 6379
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
alternative.redis.db: '0'
alternative.redis.host: 'salt'
alternative.redis.port: 6379
alternative.redis.password: ''
To use the redis returner, append '--return redis' to the salt command.
.. code-block:: bash
salt '*' test.ping --return redis
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return redis --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return redis --return_kwargs '{"db": "another-salt"}'
Redis Cluster Mode Options:
cluster_mode: ``False``
Whether cluster_mode is enabled or not
cluster.startup_nodes:
A list of host, port dictionaries pointing to cluster members. At least one is required
but multiple nodes are better
.. code-block:: yaml
cache.redis.cluster.startup_nodes
- host: redis-member-1
port: 6379
- host: redis-member-2
port: 6379
cluster.skip_full_coverage_check: ``False``
Some cluster providers restrict certain redis commands such as CONFIG for enhanced security.
Set this option to true to skip checks that required advanced privileges.
.. note::
Most cloud hosted redis clusters will require this to be set to ``True``
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
import salt.returners
import salt.utils.jid
import salt.utils.json
import salt.utils.platform
# Import 3rd-party libs
from salt.ext import six
try:
import redis
HAS_REDIS = True
except ImportError:
HAS_REDIS = False
log = logging.getLogger(__name__)
try:
from rediscluster import StrictRedisCluster
HAS_REDIS_CLUSTER = True
except ImportError:
HAS_REDIS_CLUSTER = False
REDIS_POOL = None
# Define the module's virtual name
__virtualname__ = 'redis'
def __virtual__():
'''
The redis library must be installed for this module to work.
The redis redis cluster library must be installed if cluster_mode is True
'''
if not HAS_REDIS:
return False, 'Could not import redis returner; ' \
'redis python client is not installed.'
if not HAS_REDIS_CLUSTER and _get_options().get('cluster_mode', False):
return (False, "Please install the redis-py-cluster package.")
return __virtualname__
def _get_options(ret=None):
'''
Get the redis options from salt.
'''
attrs = {'host': 'host',
'port': 'port',
'unix_socket_path': 'unix_socket_path',
'db': 'db',
'password': 'password',
'cluster_mode': 'cluster_mode',
'startup_nodes': 'cluster.startup_nodes',
'skip_full_coverage_check': 'cluster.skip_full_coverage_check',
}
if salt.utils.platform.is_proxy():
return {
'host': __opts__.get('redis.host', 'salt'),
'port': __opts__.get('redis.port', 6379),
'unix_socket_path': __opts__.get('redis.unix_socket_path', None),
'db': __opts__.get('redis.db', '0'),
'password': __opts__.get('redis.password', ''),
'cluster_mode': __opts__.get('redis.cluster_mode', False),
'startup_nodes': __opts__.get('redis.cluster.startup_nodes', {}),
'skip_full_coverage_check': __opts__.get('redis.cluster.skip_full_coverage_check', False)
}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_serv(ret=None):
'''
Return a redis server object
'''
_options = _get_options(ret)
global REDIS_POOL
if REDIS_POOL:
return REDIS_POOL
elif _options.get('cluster_mode'):
REDIS_POOL = StrictRedisCluster(startup_nodes=_options.get('startup_nodes'),
skip_full_coverage_check=_options.get('skip_full_coverage_check'),
decode_responses=True)
else:
REDIS_POOL = redis.StrictRedis(host=_options.get('host'),
port=_options.get('port'),
unix_socket_path=_options.get('unix_socket_path', None),
db=_options.get('db'),
decode_responses=True,
password=_options.get('password'))
return REDIS_POOL
def _get_ttl():
return __opts__.get('keep_jobs', 24) * 3600
def returner(ret):
'''
Return data to a redis data store
'''
serv = _get_serv(ret)
pipeline = serv.pipeline(transaction=False)
minion, jid = ret['id'], ret['jid']
pipeline.hset('ret:{0}'.format(jid), minion, salt.utils.json.dumps(ret))
pipeline.expire('ret:{0}'.format(jid), _get_ttl())
pipeline.set('{0}:{1}'.format(minion, ret['fun']), jid)
pipeline.sadd('minions', minion)
pipeline.execute()
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid
'''
serv = _get_serv(ret=None)
serv.setex('load:{0}'.format(jid), _get_ttl(), salt.utils.json.dumps(load))
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
serv = _get_serv(ret=None)
data = serv.get('load:{0}'.format(jid))
if data:
return salt.utils.json.loads(data)
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
serv = _get_serv(ret=None)
ret = {}
for minion, data in six.iteritems(serv.hgetall('ret:{0}'.format(jid))):
if data:
ret[minion] = salt.utils.json.loads(data)
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
serv = _get_serv(ret=None)
ret = {}
for minion in serv.smembers('minions'):
ind_str = '{0}:{1}'.format(minion, fun)
try:
jid = serv.get(ind_str)
except Exception:
continue
if not jid:
continue
data = serv.get('{0}:{1}'.format(minion, jid))
if data:
ret[minion] = salt.utils.json.loads(data)
return ret
def get_minions():
'''
Return a list of minions
'''
serv = _get_serv(ret=None)
return list(serv.smembers('minions'))
def clean_old_jobs():
'''
Clean out minions's return data for old jobs.
Normally, hset 'ret:<jid>' are saved with a TTL, and will eventually
get cleaned by redis.But for jobs with some very late minion return, the
corresponding hset's TTL will be refreshed to a too late timestamp, we'll
do manually cleaning here.
'''
serv = _get_serv(ret=None)
ret_jids = serv.keys('ret:*')
living_jids = set(serv.keys('load:*'))
to_remove = []
for ret_key in ret_jids:
load_key = ret_key.replace('ret:', 'load:', 1)
if load_key not in living_jids:
to_remove.append(ret_key)
if to_remove:
serv.delete(*to_remove)
log.debug('clean old jobs: %s', to_remove)
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/returners/redis_return.py
|
clean_old_jobs
|
python
|
def clean_old_jobs():
'''
Clean out minions's return data for old jobs.
Normally, hset 'ret:<jid>' are saved with a TTL, and will eventually
get cleaned by redis.But for jobs with some very late minion return, the
corresponding hset's TTL will be refreshed to a too late timestamp, we'll
do manually cleaning here.
'''
serv = _get_serv(ret=None)
ret_jids = serv.keys('ret:*')
living_jids = set(serv.keys('load:*'))
to_remove = []
for ret_key in ret_jids:
load_key = ret_key.replace('ret:', 'load:', 1)
if load_key not in living_jids:
to_remove.append(ret_key)
if to_remove:
serv.delete(*to_remove)
log.debug('clean old jobs: %s', to_remove)
|
Clean out minions's return data for old jobs.
Normally, hset 'ret:<jid>' are saved with a TTL, and will eventually
get cleaned by redis.But for jobs with some very late minion return, the
corresponding hset's TTL will be refreshed to a too late timestamp, we'll
do manually cleaning here.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/redis_return.py#L300-L319
|
[
"def _get_serv(ret=None):\n '''\n Return a redis server object\n '''\n _options = _get_options(ret)\n global REDIS_POOL\n if REDIS_POOL:\n return REDIS_POOL\n elif _options.get('cluster_mode'):\n REDIS_POOL = StrictRedisCluster(startup_nodes=_options.get('startup_nodes'),\n skip_full_coverage_check=_options.get('skip_full_coverage_check'),\n decode_responses=True)\n else:\n REDIS_POOL = redis.StrictRedis(host=_options.get('host'),\n port=_options.get('port'),\n unix_socket_path=_options.get('unix_socket_path', None),\n db=_options.get('db'),\n decode_responses=True,\n password=_options.get('password'))\n return REDIS_POOL\n"
] |
# -*- coding: utf-8 -*-
'''
Return data to a redis server
To enable this returner the minion will need the python client for redis
installed and the following values configured in the minion or master
config, these are the defaults:
.. code-block:: yaml
redis.db: '0'
redis.host: 'salt'
redis.port: 6379
redis.password: ''
.. versionadded:: 2018.3.1
Alternatively a UNIX socket can be specified by `unix_socket_path`:
.. code-block:: yaml
redis.db: '0'
redis.unix_socket_path: /var/run/redis/redis.sock
Cluster Mode Example:
.. code-block:: yaml
redis.db: '0'
redis.cluster_mode: true
redis.cluster.skip_full_coverage_check: true
redis.cluster.startup_nodes:
- host: redis-member-1
port: 6379
- host: redis-member-2
port: 6379
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location:
.. code-block:: yaml
alternative.redis.db: '0'
alternative.redis.host: 'salt'
alternative.redis.port: 6379
alternative.redis.password: ''
To use the redis returner, append '--return redis' to the salt command.
.. code-block:: bash
salt '*' test.ping --return redis
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return redis --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return redis --return_kwargs '{"db": "another-salt"}'
Redis Cluster Mode Options:
cluster_mode: ``False``
Whether cluster_mode is enabled or not
cluster.startup_nodes:
A list of host, port dictionaries pointing to cluster members. At least one is required
but multiple nodes are better
.. code-block:: yaml
cache.redis.cluster.startup_nodes
- host: redis-member-1
port: 6379
- host: redis-member-2
port: 6379
cluster.skip_full_coverage_check: ``False``
Some cluster providers restrict certain redis commands such as CONFIG for enhanced security.
Set this option to true to skip checks that required advanced privileges.
.. note::
Most cloud hosted redis clusters will require this to be set to ``True``
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt libs
import salt.returners
import salt.utils.jid
import salt.utils.json
import salt.utils.platform
# Import 3rd-party libs
from salt.ext import six
try:
import redis
HAS_REDIS = True
except ImportError:
HAS_REDIS = False
log = logging.getLogger(__name__)
try:
from rediscluster import StrictRedisCluster
HAS_REDIS_CLUSTER = True
except ImportError:
HAS_REDIS_CLUSTER = False
REDIS_POOL = None
# Define the module's virtual name
__virtualname__ = 'redis'
def __virtual__():
'''
The redis library must be installed for this module to work.
The redis redis cluster library must be installed if cluster_mode is True
'''
if not HAS_REDIS:
return False, 'Could not import redis returner; ' \
'redis python client is not installed.'
if not HAS_REDIS_CLUSTER and _get_options().get('cluster_mode', False):
return (False, "Please install the redis-py-cluster package.")
return __virtualname__
def _get_options(ret=None):
'''
Get the redis options from salt.
'''
attrs = {'host': 'host',
'port': 'port',
'unix_socket_path': 'unix_socket_path',
'db': 'db',
'password': 'password',
'cluster_mode': 'cluster_mode',
'startup_nodes': 'cluster.startup_nodes',
'skip_full_coverage_check': 'cluster.skip_full_coverage_check',
}
if salt.utils.platform.is_proxy():
return {
'host': __opts__.get('redis.host', 'salt'),
'port': __opts__.get('redis.port', 6379),
'unix_socket_path': __opts__.get('redis.unix_socket_path', None),
'db': __opts__.get('redis.db', '0'),
'password': __opts__.get('redis.password', ''),
'cluster_mode': __opts__.get('redis.cluster_mode', False),
'startup_nodes': __opts__.get('redis.cluster.startup_nodes', {}),
'skip_full_coverage_check': __opts__.get('redis.cluster.skip_full_coverage_check', False)
}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_serv(ret=None):
'''
Return a redis server object
'''
_options = _get_options(ret)
global REDIS_POOL
if REDIS_POOL:
return REDIS_POOL
elif _options.get('cluster_mode'):
REDIS_POOL = StrictRedisCluster(startup_nodes=_options.get('startup_nodes'),
skip_full_coverage_check=_options.get('skip_full_coverage_check'),
decode_responses=True)
else:
REDIS_POOL = redis.StrictRedis(host=_options.get('host'),
port=_options.get('port'),
unix_socket_path=_options.get('unix_socket_path', None),
db=_options.get('db'),
decode_responses=True,
password=_options.get('password'))
return REDIS_POOL
def _get_ttl():
return __opts__.get('keep_jobs', 24) * 3600
def returner(ret):
'''
Return data to a redis data store
'''
serv = _get_serv(ret)
pipeline = serv.pipeline(transaction=False)
minion, jid = ret['id'], ret['jid']
pipeline.hset('ret:{0}'.format(jid), minion, salt.utils.json.dumps(ret))
pipeline.expire('ret:{0}'.format(jid), _get_ttl())
pipeline.set('{0}:{1}'.format(minion, ret['fun']), jid)
pipeline.sadd('minions', minion)
pipeline.execute()
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid
'''
serv = _get_serv(ret=None)
serv.setex('load:{0}'.format(jid), _get_ttl(), salt.utils.json.dumps(load))
def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
serv = _get_serv(ret=None)
data = serv.get('load:{0}'.format(jid))
if data:
return salt.utils.json.loads(data)
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
serv = _get_serv(ret=None)
ret = {}
for minion, data in six.iteritems(serv.hgetall('ret:{0}'.format(jid))):
if data:
ret[minion] = salt.utils.json.loads(data)
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
serv = _get_serv(ret=None)
ret = {}
for minion in serv.smembers('minions'):
ind_str = '{0}:{1}'.format(minion, fun)
try:
jid = serv.get(ind_str)
except Exception:
continue
if not jid:
continue
data = serv.get('{0}:{1}'.format(minion, jid))
if data:
ret[minion] = salt.utils.json.loads(data)
return ret
def get_jids():
'''
Return a dict mapping all job ids to job information
'''
serv = _get_serv(ret=None)
ret = {}
for s in serv.mget(serv.keys('load:*')):
if s is None:
continue
load = salt.utils.json.loads(s)
jid = load['jid']
ret[jid] = salt.utils.jid.format_jid_instance(jid, load)
return ret
def get_minions():
'''
Return a list of minions
'''
serv = _get_serv(ret=None)
return list(serv.smembers('minions'))
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
|
saltstack/salt
|
salt/cli/api.py
|
SaltAPI.prepare
|
python
|
def prepare(self):
'''
Run the preparation sequence required to start a salt-api daemon.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(SaltAPI, self).prepare()
try:
if self.config['verify_env']:
logfile = self.config['log_file']
if logfile is not None and not logfile.startswith(('tcp://',
'udp://',
'file://')):
# Logfile is not using Syslog, verify
with salt.utils.files.set_umask(0o027):
verify_files([logfile], self.config['user'])
except OSError as err:
log.exception('Failed to prepare salt environment')
self.shutdown(err.errno)
self.setup_logfile_logger()
verify_log(self.config)
log.info('Setting up the Salt API')
self.api = salt.client.netapi.NetapiClient(self.config)
self.daemonize_if_required()
self.set_pidfile()
|
Run the preparation sequence required to start a salt-api daemon.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/api.py#L28-L56
|
[
"def verify_files(files, user):\n '''\n Verify that the named files exist and are owned by the named user\n '''\n if salt.utils.platform.is_windows():\n return True\n import pwd # after confirming not running Windows\n try:\n pwnam = pwd.getpwnam(user)\n uid = pwnam[2]\n except KeyError:\n err = ('Failed to prepare the Salt environment for user '\n '{0}. The user is not available.\\n').format(user)\n sys.stderr.write(err)\n sys.exit(salt.defaults.exitcodes.EX_NOUSER)\n\n for fn_ in files:\n dirname = os.path.dirname(fn_)\n try:\n if dirname:\n try:\n os.makedirs(dirname)\n except OSError as err:\n if err.errno != errno.EEXIST:\n raise\n if not os.path.isfile(fn_):\n with salt.utils.files.fopen(fn_, 'w'):\n pass\n\n except IOError as err:\n if os.path.isfile(dirname):\n msg = 'Failed to create path {0}, is {1} a file?'.format(fn_, dirname)\n raise SaltSystemExit(msg=msg)\n if err.errno != errno.EACCES:\n raise\n msg = 'No permissions to access \"{0}\", are you running as the correct user?'.format(fn_)\n raise SaltSystemExit(msg=msg)\n\n except OSError as err:\n msg = 'Failed to create path \"{0}\" - {1}'.format(fn_, err)\n raise SaltSystemExit(msg=msg)\n\n stats = os.stat(fn_)\n if uid != stats.st_uid:\n try:\n os.chown(fn_, uid, -1)\n except OSError:\n pass\n return True\n",
"def verify_log(opts):\n '''\n If an insecre logging configuration is found, show a warning\n '''\n level = LOG_LEVELS.get(str(opts.get('log_level')).lower(), logging.NOTSET)\n\n if level < logging.INFO:\n log.warning('Insecure logging configuration detected! Sensitive data may be logged.')\n",
"def shutdown(self, exitcode=0, exitmsg=None):\n '''\n If sub-classed, run any shutdown operations on this method.\n '''\n log.info('The salt-api is shutting down..')\n msg = 'The salt-api is shutdown. '\n if exitmsg is not None:\n exitmsg = msg + exitmsg\n else:\n exitmsg = msg.strip()\n super(SaltAPI, self).shutdown(exitcode, exitmsg)\n"
] |
class SaltAPI(parsers.SaltAPIParser):
'''
The cli parser object used to fire up the salt api system.
'''
def start(self):
'''
Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(SaltAPI, self).start()
if check_user(self.config['user']):
log.info('The salt-api is starting up')
self.api.run()
def shutdown(self, exitcode=0, exitmsg=None):
'''
If sub-classed, run any shutdown operations on this method.
'''
log.info('The salt-api is shutting down..')
msg = 'The salt-api is shutdown. '
if exitmsg is not None:
exitmsg = msg + exitmsg
else:
exitmsg = msg.strip()
super(SaltAPI, self).shutdown(exitcode, exitmsg)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate signal to the process manager processes
self.api.process_manager.stop_restarting()
self.api.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.api.process_manager.kill_children()
super(SaltAPI, self)._handle_signals(signum, sigframe)
|
saltstack/salt
|
salt/cli/api.py
|
SaltAPI.start
|
python
|
def start(self):
'''
Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(SaltAPI, self).start()
if check_user(self.config['user']):
log.info('The salt-api is starting up')
self.api.run()
|
Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/api.py#L58-L71
|
[
"def check_user(user):\n '''\n Check user and assign process uid/gid.\n '''\n if salt.utils.platform.is_windows():\n return True\n if user == salt.utils.user.get_user():\n return True\n import pwd # after confirming not running Windows\n try:\n pwuser = pwd.getpwnam(user)\n try:\n if hasattr(os, 'initgroups'):\n os.initgroups(user, pwuser.pw_gid) # pylint: disable=minimum-python-version\n else:\n os.setgroups(salt.utils.user.get_gid_list(user, include_default=False))\n os.setgid(pwuser.pw_gid)\n os.setuid(pwuser.pw_uid)\n\n # We could just reset the whole environment but let's just override\n # the variables we can get from pwuser\n if 'HOME' in os.environ:\n os.environ['HOME'] = pwuser.pw_dir\n\n if 'SHELL' in os.environ:\n os.environ['SHELL'] = pwuser.pw_shell\n\n for envvar in ('USER', 'LOGNAME'):\n if envvar in os.environ:\n os.environ[envvar] = pwuser.pw_name\n\n except OSError:\n msg = 'Salt configured to run as user \"{0}\" but unable to switch.'\n msg = msg.format(user)\n if is_console_configured():\n log.critical(msg)\n else:\n sys.stderr.write(\"CRITICAL: {0}\\n\".format(msg))\n return False\n except KeyError:\n msg = 'User not found: \"{0}\"'.format(user)\n if is_console_configured():\n log.critical(msg)\n else:\n sys.stderr.write(\"CRITICAL: {0}\\n\".format(msg))\n return False\n return True\n"
] |
class SaltAPI(parsers.SaltAPIParser):
'''
The cli parser object used to fire up the salt api system.
'''
def prepare(self):
'''
Run the preparation sequence required to start a salt-api daemon.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(SaltAPI, self).prepare()
try:
if self.config['verify_env']:
logfile = self.config['log_file']
if logfile is not None and not logfile.startswith(('tcp://',
'udp://',
'file://')):
# Logfile is not using Syslog, verify
with salt.utils.files.set_umask(0o027):
verify_files([logfile], self.config['user'])
except OSError as err:
log.exception('Failed to prepare salt environment')
self.shutdown(err.errno)
self.setup_logfile_logger()
verify_log(self.config)
log.info('Setting up the Salt API')
self.api = salt.client.netapi.NetapiClient(self.config)
self.daemonize_if_required()
self.set_pidfile()
def shutdown(self, exitcode=0, exitmsg=None):
'''
If sub-classed, run any shutdown operations on this method.
'''
log.info('The salt-api is shutting down..')
msg = 'The salt-api is shutdown. '
if exitmsg is not None:
exitmsg = msg + exitmsg
else:
exitmsg = msg.strip()
super(SaltAPI, self).shutdown(exitcode, exitmsg)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate signal to the process manager processes
self.api.process_manager.stop_restarting()
self.api.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.api.process_manager.kill_children()
super(SaltAPI, self)._handle_signals(signum, sigframe)
|
saltstack/salt
|
salt/cli/api.py
|
SaltAPI.shutdown
|
python
|
def shutdown(self, exitcode=0, exitmsg=None):
'''
If sub-classed, run any shutdown operations on this method.
'''
log.info('The salt-api is shutting down..')
msg = 'The salt-api is shutdown. '
if exitmsg is not None:
exitmsg = msg + exitmsg
else:
exitmsg = msg.strip()
super(SaltAPI, self).shutdown(exitcode, exitmsg)
|
If sub-classed, run any shutdown operations on this method.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/api.py#L73-L83
| null |
class SaltAPI(parsers.SaltAPIParser):
'''
The cli parser object used to fire up the salt api system.
'''
def prepare(self):
'''
Run the preparation sequence required to start a salt-api daemon.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).prepare()
'''
super(SaltAPI, self).prepare()
try:
if self.config['verify_env']:
logfile = self.config['log_file']
if logfile is not None and not logfile.startswith(('tcp://',
'udp://',
'file://')):
# Logfile is not using Syslog, verify
with salt.utils.files.set_umask(0o027):
verify_files([logfile], self.config['user'])
except OSError as err:
log.exception('Failed to prepare salt environment')
self.shutdown(err.errno)
self.setup_logfile_logger()
verify_log(self.config)
log.info('Setting up the Salt API')
self.api = salt.client.netapi.NetapiClient(self.config)
self.daemonize_if_required()
self.set_pidfile()
def start(self):
'''
Start the actual master.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(SaltAPI, self).start()
if check_user(self.config['user']):
log.info('The salt-api is starting up')
self.api.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate signal to the process manager processes
self.api.process_manager.stop_restarting()
self.api.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.api.process_manager.kill_children()
super(SaltAPI, self)._handle_signals(signum, sigframe)
|
saltstack/salt
|
salt/states/zookeeper.py
|
present
|
python
|
def present(name, value, acls=None, ephemeral=False, sequence=False, makepath=False, version=-1,
profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Make sure znode is present in the correct state with the correct acls
name
path to znode
value
value znode should be set to
acls
list of acl dictionaries to set on znode (make sure the ones salt is connected with are included)
Default: None
ephemeral
Boolean to indicate if ephemeral znode should be created
Default: False
sequence
Boolean to indicate if znode path is suffixed with a unique index
Default: False
makepath
Boolean to indicate if the parent paths should be created
Default: False
version
For updating, specify the version which should be updated
Default: -1 (always match)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
add znode:
zookeeper.present:
- name: /test/name
- value: gtmanfred
- makepath: True
update znode:
zookeeper.present:
- name: /test/name
- value: daniel
- acls:
- username: daniel
password: test
read: true
- username: gtmanfred
password: test
read: true
write: true
create: true
delete: true
admin: true
- makepath: True
'''
ret = {'name': name,
'result': False,
'comment': 'Failed to setup znode {0}'.format(name),
'changes': {}}
connkwargs = {'profile': profile, 'hosts': hosts, 'scheme': scheme,
'username': username, 'password': password,
'default_acl': default_acl}
if acls is None:
chk_acls = []
else:
chk_acls = [__salt__['zookeeper.make_digest_acl'](**acl) for acl in acls]
if __salt__['zookeeper.exists'](name, **connkwargs):
cur_value = __salt__['zookeeper.get'](name, **connkwargs)
cur_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
if cur_value == value and (not chk_acls or _check_acls(cur_acls, chk_acls)):
ret['result'] = True
ret['comment'] = 'Znode {0} is already set to the correct value with the correct acls'.format(name)
return ret
elif __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Znode {0} will be updated'.format(name)
ret['changes']['old'] = {}
ret['changes']['new'] = {}
if value != cur_value:
ret['changes']['old']['value'] = cur_value
ret['changes']['new']['value'] = value
if chk_acls and not _check_acls(chk_acls, cur_acls):
ret['changes']['old']['acls'] = cur_acls
ret['changes']['new']['acls'] = chk_acls
return ret
else:
value_result, acl_result = True, True
changes = {}
if value != cur_value:
__salt__['zookeeper.set'](name, value, version, **connkwargs)
new_value = __salt__['zookeeper.get'](name, **connkwargs)
value_result = new_value == value
changes.setdefault('new', {}).setdefault('value', new_value)
changes.setdefault('old', {}).setdefault('value', cur_value)
if chk_acls and not _check_acls(chk_acls, cur_acls):
__salt__['zookeeper.set_acls'](name, acls, version, **connkwargs)
new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
acl_result = _check_acls(new_acls, chk_acls)
changes.setdefault('new', {}).setdefault('acls', new_acls)
changes.setdefault('old', {}).setdefault('value', cur_acls)
ret['changes'] = changes
if value_result and acl_result:
ret['result'] = True
ret['comment'] = 'Znode {0} successfully updated'.format(name)
return ret
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = '{0} is will be created'.format(name)
ret['changes']['old'] = {}
ret['changes']['new'] = {}
ret['changes']['new']['acls'] = chk_acls
ret['changes']['new']['value'] = value
return ret
__salt__['zookeeper.create'](name, value, acls, ephemeral, sequence, makepath, **connkwargs)
value_result, acl_result = True, True
changes = {'old': {}}
new_value = __salt__['zookeeper.get'](name, **connkwargs)
value_result = new_value == value
changes.setdefault('new', {}).setdefault('value', new_value)
new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
acl_result = acls is None or _check_acls(new_acls, chk_acls)
changes.setdefault('new', {}).setdefault('acls', new_acls)
ret['changes'] = changes
if value_result and acl_result:
ret['result'] = True
ret['comment'] = 'Znode {0} successfully created'.format(name)
return ret
|
Make sure znode is present in the correct state with the correct acls
name
path to znode
value
value znode should be set to
acls
list of acl dictionaries to set on znode (make sure the ones salt is connected with are included)
Default: None
ephemeral
Boolean to indicate if ephemeral znode should be created
Default: False
sequence
Boolean to indicate if znode path is suffixed with a unique index
Default: False
makepath
Boolean to indicate if the parent paths should be created
Default: False
version
For updating, specify the version which should be updated
Default: -1 (always match)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
add znode:
zookeeper.present:
- name: /test/name
- value: gtmanfred
- makepath: True
update znode:
zookeeper.present:
- name: /test/name
- value: daniel
- acls:
- username: daniel
password: test
read: true
- username: gtmanfred
password: test
read: true
write: true
create: true
delete: true
admin: true
- makepath: True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zookeeper.py#L47-L200
|
[
"def _check_acls(left, right):\n first = not bool(set(left) - set(right))\n second = not bool(set(right) - set(left))\n return first and second\n"
] |
# -*- coding: utf-8 -*-
'''
:depends: kazoo
:configuration: See :py:mod:`salt.modules.zookeeper` for setup instructions.
ACLS
~~~~
For more information about acls, please checkout the kazoo documentation.
http://kazoo.readthedocs.io/en/latest/api/security.html#kazoo.security.make_digest_acl
The following options can be included in the acl dictionary:
:param username: Username to use for the ACL.
:param password: A plain-text password to hash.
:param write: Write permission.
:type write: bool
:param create: Create permission.
:type create: bool
:param delete: Delete permission.
:type delete: bool
:param admin: Admin permission.
:type admin: bool
:param all: All permissions.
:type all: bool
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
__virtualname__ = 'zookeeper'
def __virtual__():
if 'zookeeper.create' in __salt__:
return __virtualname__
return False
def _check_acls(left, right):
first = not bool(set(left) - set(right))
second = not bool(set(right) - set(left))
return first and second
def absent(name, version=-1, recursive=False, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
'''
Make sure znode is absent
name
path to znode
version
Specify the version which should be deleted
Default: -1 (always match)
recursive
Boolean to indicate if children should be recursively deleted
Default: False
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
delete znode:
zookeeper.absent:
- name: /test
- recursive: True
'''
ret = {'name': name,
'result': False,
'comment': 'Failed to delete znode {0}'.format(name),
'changes': {}}
connkwargs = {'profile': profile, 'hosts': hosts, 'scheme': scheme,
'username': username, 'password': password,
'default_acl': default_acl}
if __salt__['zookeeper.exists'](name, **connkwargs) is False:
ret['result'] = True
ret['comment'] = 'Znode {0} does not exist'.format(name)
return ret
changes = {}
changes['value'] = __salt__['zookeeper.get'](name, **connkwargs)
changes['acls'] = __salt__['zookeeper.get_acls'](name, **connkwargs)
if recursive is True:
changes['children'] = __salt__['zookeeper.get_children'](name, **connkwargs)
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Znode {0} will be removed'.format(name)
ret['changes']['old'] = changes
return ret
__salt__['zookeeper.delete'](name, version, recursive, **connkwargs)
if __salt__['zookeeper.exists'](name, **connkwargs) is False:
ret['result'] = True
ret['comment'] = 'Znode {0} has been removed'.format(name)
ret['changes']['old'] = changes
return ret
def acls(name, acls, version=-1, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
'''
Update acls on a znode
name
path to znode
acls
list of acl dictionaries to set on znode
version
Specify the version which should be deleted
Default: -1 (always match)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
update acls:
zookeeper.acls:
- name: /test/name
- acls:
- username: daniel
password: test
all: True
- username: gtmanfred
password: test
all: True
'''
ret = {'name': name,
'result': False,
'comment': 'Failed to set acls on znode {0}'.format(name),
'changes': {}}
connkwargs = {'profile': profile, 'hosts': hosts, 'scheme': scheme,
'username': username, 'password': password,
'default_acl': default_acl}
if isinstance(acls, dict):
acls = [acls]
chk_acls = [__salt__['zookeeper.make_digest_acl'](**acl) for acl in acls]
if not __salt__['zookeeper.exists'](name, **connkwargs):
ret['comment'] += ': Znode does not exist'
return ret
cur_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
if _check_acls(cur_acls, chk_acls):
ret['result'] = True
ret['comment'] = 'Znode {0} acls already set'.format(name)
return ret
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Znode {0} acls will be updated'.format(name)
ret['changes']['old'] = cur_acls
ret['changes']['new'] = chk_acls
return ret
__salt__['zookeeper.set_acls'](name, acls, version, **connkwargs)
new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
ret['changes'] = {'old': cur_acls, 'new': new_acls}
if _check_acls(new_acls, chk_acls):
ret['result'] = True
ret['comment'] = 'Znode {0} acls updated'.format(name)
return ret
ret['comment'] = 'Znode {0} acls failed to update'.format(name)
return ret
|
saltstack/salt
|
salt/states/zookeeper.py
|
absent
|
python
|
def absent(name, version=-1, recursive=False, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
'''
Make sure znode is absent
name
path to znode
version
Specify the version which should be deleted
Default: -1 (always match)
recursive
Boolean to indicate if children should be recursively deleted
Default: False
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
delete znode:
zookeeper.absent:
- name: /test
- recursive: True
'''
ret = {'name': name,
'result': False,
'comment': 'Failed to delete znode {0}'.format(name),
'changes': {}}
connkwargs = {'profile': profile, 'hosts': hosts, 'scheme': scheme,
'username': username, 'password': password,
'default_acl': default_acl}
if __salt__['zookeeper.exists'](name, **connkwargs) is False:
ret['result'] = True
ret['comment'] = 'Znode {0} does not exist'.format(name)
return ret
changes = {}
changes['value'] = __salt__['zookeeper.get'](name, **connkwargs)
changes['acls'] = __salt__['zookeeper.get_acls'](name, **connkwargs)
if recursive is True:
changes['children'] = __salt__['zookeeper.get_children'](name, **connkwargs)
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Znode {0} will be removed'.format(name)
ret['changes']['old'] = changes
return ret
__salt__['zookeeper.delete'](name, version, recursive, **connkwargs)
if __salt__['zookeeper.exists'](name, **connkwargs) is False:
ret['result'] = True
ret['comment'] = 'Znode {0} has been removed'.format(name)
ret['changes']['old'] = changes
return ret
|
Make sure znode is absent
name
path to znode
version
Specify the version which should be deleted
Default: -1 (always match)
recursive
Boolean to indicate if children should be recursively deleted
Default: False
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
delete znode:
zookeeper.absent:
- name: /test
- recursive: True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zookeeper.py#L203-L276
| null |
# -*- coding: utf-8 -*-
'''
:depends: kazoo
:configuration: See :py:mod:`salt.modules.zookeeper` for setup instructions.
ACLS
~~~~
For more information about acls, please checkout the kazoo documentation.
http://kazoo.readthedocs.io/en/latest/api/security.html#kazoo.security.make_digest_acl
The following options can be included in the acl dictionary:
:param username: Username to use for the ACL.
:param password: A plain-text password to hash.
:param write: Write permission.
:type write: bool
:param create: Create permission.
:type create: bool
:param delete: Delete permission.
:type delete: bool
:param admin: Admin permission.
:type admin: bool
:param all: All permissions.
:type all: bool
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
__virtualname__ = 'zookeeper'
def __virtual__():
if 'zookeeper.create' in __salt__:
return __virtualname__
return False
def _check_acls(left, right):
first = not bool(set(left) - set(right))
second = not bool(set(right) - set(left))
return first and second
def present(name, value, acls=None, ephemeral=False, sequence=False, makepath=False, version=-1,
profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Make sure znode is present in the correct state with the correct acls
name
path to znode
value
value znode should be set to
acls
list of acl dictionaries to set on znode (make sure the ones salt is connected with are included)
Default: None
ephemeral
Boolean to indicate if ephemeral znode should be created
Default: False
sequence
Boolean to indicate if znode path is suffixed with a unique index
Default: False
makepath
Boolean to indicate if the parent paths should be created
Default: False
version
For updating, specify the version which should be updated
Default: -1 (always match)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
add znode:
zookeeper.present:
- name: /test/name
- value: gtmanfred
- makepath: True
update znode:
zookeeper.present:
- name: /test/name
- value: daniel
- acls:
- username: daniel
password: test
read: true
- username: gtmanfred
password: test
read: true
write: true
create: true
delete: true
admin: true
- makepath: True
'''
ret = {'name': name,
'result': False,
'comment': 'Failed to setup znode {0}'.format(name),
'changes': {}}
connkwargs = {'profile': profile, 'hosts': hosts, 'scheme': scheme,
'username': username, 'password': password,
'default_acl': default_acl}
if acls is None:
chk_acls = []
else:
chk_acls = [__salt__['zookeeper.make_digest_acl'](**acl) for acl in acls]
if __salt__['zookeeper.exists'](name, **connkwargs):
cur_value = __salt__['zookeeper.get'](name, **connkwargs)
cur_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
if cur_value == value and (not chk_acls or _check_acls(cur_acls, chk_acls)):
ret['result'] = True
ret['comment'] = 'Znode {0} is already set to the correct value with the correct acls'.format(name)
return ret
elif __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Znode {0} will be updated'.format(name)
ret['changes']['old'] = {}
ret['changes']['new'] = {}
if value != cur_value:
ret['changes']['old']['value'] = cur_value
ret['changes']['new']['value'] = value
if chk_acls and not _check_acls(chk_acls, cur_acls):
ret['changes']['old']['acls'] = cur_acls
ret['changes']['new']['acls'] = chk_acls
return ret
else:
value_result, acl_result = True, True
changes = {}
if value != cur_value:
__salt__['zookeeper.set'](name, value, version, **connkwargs)
new_value = __salt__['zookeeper.get'](name, **connkwargs)
value_result = new_value == value
changes.setdefault('new', {}).setdefault('value', new_value)
changes.setdefault('old', {}).setdefault('value', cur_value)
if chk_acls and not _check_acls(chk_acls, cur_acls):
__salt__['zookeeper.set_acls'](name, acls, version, **connkwargs)
new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
acl_result = _check_acls(new_acls, chk_acls)
changes.setdefault('new', {}).setdefault('acls', new_acls)
changes.setdefault('old', {}).setdefault('value', cur_acls)
ret['changes'] = changes
if value_result and acl_result:
ret['result'] = True
ret['comment'] = 'Znode {0} successfully updated'.format(name)
return ret
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = '{0} is will be created'.format(name)
ret['changes']['old'] = {}
ret['changes']['new'] = {}
ret['changes']['new']['acls'] = chk_acls
ret['changes']['new']['value'] = value
return ret
__salt__['zookeeper.create'](name, value, acls, ephemeral, sequence, makepath, **connkwargs)
value_result, acl_result = True, True
changes = {'old': {}}
new_value = __salt__['zookeeper.get'](name, **connkwargs)
value_result = new_value == value
changes.setdefault('new', {}).setdefault('value', new_value)
new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
acl_result = acls is None or _check_acls(new_acls, chk_acls)
changes.setdefault('new', {}).setdefault('acls', new_acls)
ret['changes'] = changes
if value_result and acl_result:
ret['result'] = True
ret['comment'] = 'Znode {0} successfully created'.format(name)
return ret
def acls(name, acls, version=-1, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
'''
Update acls on a znode
name
path to znode
acls
list of acl dictionaries to set on znode
version
Specify the version which should be deleted
Default: -1 (always match)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
update acls:
zookeeper.acls:
- name: /test/name
- acls:
- username: daniel
password: test
all: True
- username: gtmanfred
password: test
all: True
'''
ret = {'name': name,
'result': False,
'comment': 'Failed to set acls on znode {0}'.format(name),
'changes': {}}
connkwargs = {'profile': profile, 'hosts': hosts, 'scheme': scheme,
'username': username, 'password': password,
'default_acl': default_acl}
if isinstance(acls, dict):
acls = [acls]
chk_acls = [__salt__['zookeeper.make_digest_acl'](**acl) for acl in acls]
if not __salt__['zookeeper.exists'](name, **connkwargs):
ret['comment'] += ': Znode does not exist'
return ret
cur_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
if _check_acls(cur_acls, chk_acls):
ret['result'] = True
ret['comment'] = 'Znode {0} acls already set'.format(name)
return ret
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Znode {0} acls will be updated'.format(name)
ret['changes']['old'] = cur_acls
ret['changes']['new'] = chk_acls
return ret
__salt__['zookeeper.set_acls'](name, acls, version, **connkwargs)
new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
ret['changes'] = {'old': cur_acls, 'new': new_acls}
if _check_acls(new_acls, chk_acls):
ret['result'] = True
ret['comment'] = 'Znode {0} acls updated'.format(name)
return ret
ret['comment'] = 'Znode {0} acls failed to update'.format(name)
return ret
|
saltstack/salt
|
salt/states/zookeeper.py
|
acls
|
python
|
def acls(name, acls, version=-1, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
'''
Update acls on a znode
name
path to znode
acls
list of acl dictionaries to set on znode
version
Specify the version which should be deleted
Default: -1 (always match)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
update acls:
zookeeper.acls:
- name: /test/name
- acls:
- username: daniel
password: test
all: True
- username: gtmanfred
password: test
all: True
'''
ret = {'name': name,
'result': False,
'comment': 'Failed to set acls on znode {0}'.format(name),
'changes': {}}
connkwargs = {'profile': profile, 'hosts': hosts, 'scheme': scheme,
'username': username, 'password': password,
'default_acl': default_acl}
if isinstance(acls, dict):
acls = [acls]
chk_acls = [__salt__['zookeeper.make_digest_acl'](**acl) for acl in acls]
if not __salt__['zookeeper.exists'](name, **connkwargs):
ret['comment'] += ': Znode does not exist'
return ret
cur_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
if _check_acls(cur_acls, chk_acls):
ret['result'] = True
ret['comment'] = 'Znode {0} acls already set'.format(name)
return ret
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Znode {0} acls will be updated'.format(name)
ret['changes']['old'] = cur_acls
ret['changes']['new'] = chk_acls
return ret
__salt__['zookeeper.set_acls'](name, acls, version, **connkwargs)
new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
ret['changes'] = {'old': cur_acls, 'new': new_acls}
if _check_acls(new_acls, chk_acls):
ret['result'] = True
ret['comment'] = 'Znode {0} acls updated'.format(name)
return ret
ret['comment'] = 'Znode {0} acls failed to update'.format(name)
return ret
|
Update acls on a znode
name
path to znode
acls
list of acl dictionaries to set on znode
version
Specify the version which should be deleted
Default: -1 (always match)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
update acls:
zookeeper.acls:
- name: /test/name
- acls:
- username: daniel
password: test
all: True
- username: gtmanfred
password: test
all: True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zookeeper.py#L279-L362
|
[
"def _check_acls(left, right):\n first = not bool(set(left) - set(right))\n second = not bool(set(right) - set(left))\n return first and second\n"
] |
# -*- coding: utf-8 -*-
'''
:depends: kazoo
:configuration: See :py:mod:`salt.modules.zookeeper` for setup instructions.
ACLS
~~~~
For more information about acls, please checkout the kazoo documentation.
http://kazoo.readthedocs.io/en/latest/api/security.html#kazoo.security.make_digest_acl
The following options can be included in the acl dictionary:
:param username: Username to use for the ACL.
:param password: A plain-text password to hash.
:param write: Write permission.
:type write: bool
:param create: Create permission.
:type create: bool
:param delete: Delete permission.
:type delete: bool
:param admin: Admin permission.
:type admin: bool
:param all: All permissions.
:type all: bool
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
__virtualname__ = 'zookeeper'
def __virtual__():
if 'zookeeper.create' in __salt__:
return __virtualname__
return False
def _check_acls(left, right):
first = not bool(set(left) - set(right))
second = not bool(set(right) - set(left))
return first and second
def present(name, value, acls=None, ephemeral=False, sequence=False, makepath=False, version=-1,
profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Make sure znode is present in the correct state with the correct acls
name
path to znode
value
value znode should be set to
acls
list of acl dictionaries to set on znode (make sure the ones salt is connected with are included)
Default: None
ephemeral
Boolean to indicate if ephemeral znode should be created
Default: False
sequence
Boolean to indicate if znode path is suffixed with a unique index
Default: False
makepath
Boolean to indicate if the parent paths should be created
Default: False
version
For updating, specify the version which should be updated
Default: -1 (always match)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
add znode:
zookeeper.present:
- name: /test/name
- value: gtmanfred
- makepath: True
update znode:
zookeeper.present:
- name: /test/name
- value: daniel
- acls:
- username: daniel
password: test
read: true
- username: gtmanfred
password: test
read: true
write: true
create: true
delete: true
admin: true
- makepath: True
'''
ret = {'name': name,
'result': False,
'comment': 'Failed to setup znode {0}'.format(name),
'changes': {}}
connkwargs = {'profile': profile, 'hosts': hosts, 'scheme': scheme,
'username': username, 'password': password,
'default_acl': default_acl}
if acls is None:
chk_acls = []
else:
chk_acls = [__salt__['zookeeper.make_digest_acl'](**acl) for acl in acls]
if __salt__['zookeeper.exists'](name, **connkwargs):
cur_value = __salt__['zookeeper.get'](name, **connkwargs)
cur_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
if cur_value == value and (not chk_acls or _check_acls(cur_acls, chk_acls)):
ret['result'] = True
ret['comment'] = 'Znode {0} is already set to the correct value with the correct acls'.format(name)
return ret
elif __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Znode {0} will be updated'.format(name)
ret['changes']['old'] = {}
ret['changes']['new'] = {}
if value != cur_value:
ret['changes']['old']['value'] = cur_value
ret['changes']['new']['value'] = value
if chk_acls and not _check_acls(chk_acls, cur_acls):
ret['changes']['old']['acls'] = cur_acls
ret['changes']['new']['acls'] = chk_acls
return ret
else:
value_result, acl_result = True, True
changes = {}
if value != cur_value:
__salt__['zookeeper.set'](name, value, version, **connkwargs)
new_value = __salt__['zookeeper.get'](name, **connkwargs)
value_result = new_value == value
changes.setdefault('new', {}).setdefault('value', new_value)
changes.setdefault('old', {}).setdefault('value', cur_value)
if chk_acls and not _check_acls(chk_acls, cur_acls):
__salt__['zookeeper.set_acls'](name, acls, version, **connkwargs)
new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
acl_result = _check_acls(new_acls, chk_acls)
changes.setdefault('new', {}).setdefault('acls', new_acls)
changes.setdefault('old', {}).setdefault('value', cur_acls)
ret['changes'] = changes
if value_result and acl_result:
ret['result'] = True
ret['comment'] = 'Znode {0} successfully updated'.format(name)
return ret
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = '{0} is will be created'.format(name)
ret['changes']['old'] = {}
ret['changes']['new'] = {}
ret['changes']['new']['acls'] = chk_acls
ret['changes']['new']['value'] = value
return ret
__salt__['zookeeper.create'](name, value, acls, ephemeral, sequence, makepath, **connkwargs)
value_result, acl_result = True, True
changes = {'old': {}}
new_value = __salt__['zookeeper.get'](name, **connkwargs)
value_result = new_value == value
changes.setdefault('new', {}).setdefault('value', new_value)
new_acls = __salt__['zookeeper.get_acls'](name, **connkwargs)
acl_result = acls is None or _check_acls(new_acls, chk_acls)
changes.setdefault('new', {}).setdefault('acls', new_acls)
ret['changes'] = changes
if value_result and acl_result:
ret['result'] = True
ret['comment'] = 'Znode {0} successfully created'.format(name)
return ret
def absent(name, version=-1, recursive=False, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
'''
Make sure znode is absent
name
path to znode
version
Specify the version which should be deleted
Default: -1 (always match)
recursive
Boolean to indicate if children should be recursively deleted
Default: False
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
.. code-block:: yaml
delete znode:
zookeeper.absent:
- name: /test
- recursive: True
'''
ret = {'name': name,
'result': False,
'comment': 'Failed to delete znode {0}'.format(name),
'changes': {}}
connkwargs = {'profile': profile, 'hosts': hosts, 'scheme': scheme,
'username': username, 'password': password,
'default_acl': default_acl}
if __salt__['zookeeper.exists'](name, **connkwargs) is False:
ret['result'] = True
ret['comment'] = 'Znode {0} does not exist'.format(name)
return ret
changes = {}
changes['value'] = __salt__['zookeeper.get'](name, **connkwargs)
changes['acls'] = __salt__['zookeeper.get_acls'](name, **connkwargs)
if recursive is True:
changes['children'] = __salt__['zookeeper.get_children'](name, **connkwargs)
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'Znode {0} will be removed'.format(name)
ret['changes']['old'] = changes
return ret
__salt__['zookeeper.delete'](name, version, recursive, **connkwargs)
if __salt__['zookeeper.exists'](name, **connkwargs) is False:
ret['result'] = True
ret['comment'] = 'Znode {0} has been removed'.format(name)
ret['changes']['old'] = changes
return ret
|
saltstack/salt
|
salt/modules/timezone.py
|
_timedatectl
|
python
|
def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret
|
get the output of timedatectl
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L46-L56
| null |
# -*- coding: utf-8 -*-
'''
Module for managing timezone on POSIX-like systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import filecmp
import os
import errno
import logging
import re
import string
# Import salt libs
import salt.utils.files
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError, CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = 'timezone'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.platform.is_windows():
return (False, 'The timezone execution module failed to load: '
'win_timezone.py should replace this module on Windows.'
'There was a problem loading win_timezone.py.')
if salt.utils.platform.is_darwin():
return (False, 'The timezone execution module failed to load: '
'mac_timezone.py should replace this module on macOS.'
'There was a problem loading mac_timezone.py.')
return __virtualname__
def _get_zone_solaris():
tzfile = '/etc/TIMEZONE'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_adjtime_timezone():
'''
Return the timezone in /etc/adjtime of the system clock
'''
adjtime_file = '/etc/adjtime'
if os.path.exists(adjtime_file):
cmd = ['tail', '-n', '1', adjtime_file]
return __salt__['cmd.run'](cmd, python_shell=False)
elif os.path.exists('/dev/rtc'):
raise CommandExecutionError(
'Unable to get hwclock timezone from ' + adjtime_file
)
else:
# There is no RTC.
return None
def _get_zone_sysconfig():
tzfile = '/etc/sysconfig/clock'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'ZONE' in line and '=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_etc_localtime():
tzfile = _get_localtime_path()
tzdir = '/usr/share/zoneinfo/'
tzdir_len = len(tzdir)
try:
olson_name = os.path.normpath(
os.path.join('/etc', os.readlink(tzfile))
)
if olson_name.startswith(tzdir):
return olson_name[tzdir_len:]
except OSError as exc:
if exc.errno == errno.ENOENT:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
raise CommandExecutionError(tzfile + ' does not exist')
elif exc.errno == errno.EINVAL:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
log.warning(
'%s is not a symbolic link, attempting to match it '
'to zoneinfo files', tzfile
)
# Regular file. Try to match the hash.
hash_type = __opts__.get('hash_type', 'md5')
tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)
# Not a link, just a copy of the tzdata file
for root, dirs, files in salt.utils.path.os_walk(tzdir):
for filename in files:
full_path = os.path.join(root, filename)
olson_name = full_path[tzdir_len:]
if olson_name[0] in string.ascii_lowercase:
continue
if tzfile_hash == \
salt.utils.hashutils.get_hash(full_path, hash_type):
return olson_name
raise CommandExecutionError('Unable to determine timezone')
def _get_zone_etc_timezone():
tzfile = '/etc/timezone'
try:
with salt.utils.files.fopen(tzfile, 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError as exc:
raise CommandExecutionError(
'Problem reading timezone file {0}: {1}'
.format(tzfile, exc.strerror)
)
def _get_zone_aix():
tzfile = '/etc/environment'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def get_zone():
'''
Get current timezone (i.e. America/Denver)
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values can also be returned
'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')):
try:
return re.match(r'Time ?zone:\s+(\S+)', line).group(1)
except AttributeError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
if __grains__['os'].lower() == 'centos':
return _get_zone_etc_localtime()
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
return _get_zone_sysconfig()
for family in ('Debian', 'Gentoo'):
if family in os_family:
return _get_zone_etc_timezone()
if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):
return _get_zone_etc_localtime()
elif 'Solaris' in os_family:
return _get_zone_solaris()
elif 'AIX' in os_family:
return _get_zone_aix()
raise CommandExecutionError('Unable to get timezone')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
return __salt__['cmd.run'](['date', '+%Z'], python_shell=False)
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
if 'AIX' not in __grains__['os_family']:
return __salt__['cmd.run'](['date', '+%z'], python_shell=False)
salt_path = '/opt/salt/bin/date'
if not os.path.exists(salt_path):
return 'date in salt binaries does not exist: {0}'.format(salt_path)
return __salt__['cmd.run']([salt_path, '+%z'], python_shell=False)
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values are also allowed, see below
.. code-block:: bash
salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
'''
if salt.utils.path.which('timedatectl'):
try:
__salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone))
except CommandExecutionError:
pass
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
zonepath = '/usr/share/lib/zoneinfo/{0}'.format(timezone)
else:
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath) and 'AIX' not in __grains__['os_family']:
return 'Zone does not exist: {0}'.format(zonepath)
tzfile = _get_localtime_path()
if os.path.exists(tzfile):
os.unlink(tzfile)
if 'Solaris' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/default/init', '^TZ=.*', 'TZ={0}'.format(timezone))
elif 'AIX' in __grains__['os_family']:
# timezone could be Olson or Posix
curtzstring = get_zone()
cmd = ['chtz', timezone]
result = __salt__['cmd.retcode'](cmd, python_shell=False)
if result == 0:
return True
# restore orig timezone, since AIX chtz failure sets UTC
cmd = ['chtz', curtzstring]
__salt__['cmd.retcode'](cmd, python_shell=False)
return False
else:
os.symlink(zonepath, tzfile)
if 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family'] or 'Gentoo' in __grains__['os_family']:
with salt.utils.files.fopen('/etc/timezone', 'w') as ofh:
ofh.write(salt.utils.stringutils.to_str(timezone).strip())
ofh.write('\n')
return True
def zone_compare(timezone):
'''
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
return timezone == get_zone()
if 'FreeBSD' in __grains__['os_family']:
if not os.path.isfile(_get_localtime_path()):
return timezone == get_zone()
tzfile = _get_localtime_path()
zonepath = _get_zone_file(timezone)
try:
return filecmp.cmp(tzfile, zonepath, shallow=False)
except OSError as exc:
problematic_file = exc.filename
if problematic_file == zonepath:
raise SaltInvocationError(
'Can\'t find a local timezone "{0}"'.format(timezone))
elif problematic_file == tzfile:
raise CommandExecutionError(
'Failed to read {0} to determine current timezone: {1}'
.format(tzfile, exc.strerror))
raise
def _get_localtime_path():
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:
return '/etc/natinst/share/localtime'
return '/etc/localtime'
def _get_zone_file(timezone):
return '/usr/share/zoneinfo/{0}'.format(timezone)
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
if salt.utils.path.which('timedatectl'):
cmd = ['timedatectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
else:
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True
|
saltstack/salt
|
salt/modules/timezone.py
|
_get_adjtime_timezone
|
python
|
def _get_adjtime_timezone():
'''
Return the timezone in /etc/adjtime of the system clock
'''
adjtime_file = '/etc/adjtime'
if os.path.exists(adjtime_file):
cmd = ['tail', '-n', '1', adjtime_file]
return __salt__['cmd.run'](cmd, python_shell=False)
elif os.path.exists('/dev/rtc'):
raise CommandExecutionError(
'Unable to get hwclock timezone from ' + adjtime_file
)
else:
# There is no RTC.
return None
|
Return the timezone in /etc/adjtime of the system clock
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L70-L84
| null |
# -*- coding: utf-8 -*-
'''
Module for managing timezone on POSIX-like systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import filecmp
import os
import errno
import logging
import re
import string
# Import salt libs
import salt.utils.files
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError, CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = 'timezone'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.platform.is_windows():
return (False, 'The timezone execution module failed to load: '
'win_timezone.py should replace this module on Windows.'
'There was a problem loading win_timezone.py.')
if salt.utils.platform.is_darwin():
return (False, 'The timezone execution module failed to load: '
'mac_timezone.py should replace this module on macOS.'
'There was a problem loading mac_timezone.py.')
return __virtualname__
def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret
def _get_zone_solaris():
tzfile = '/etc/TIMEZONE'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_sysconfig():
tzfile = '/etc/sysconfig/clock'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'ZONE' in line and '=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_etc_localtime():
tzfile = _get_localtime_path()
tzdir = '/usr/share/zoneinfo/'
tzdir_len = len(tzdir)
try:
olson_name = os.path.normpath(
os.path.join('/etc', os.readlink(tzfile))
)
if olson_name.startswith(tzdir):
return olson_name[tzdir_len:]
except OSError as exc:
if exc.errno == errno.ENOENT:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
raise CommandExecutionError(tzfile + ' does not exist')
elif exc.errno == errno.EINVAL:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
log.warning(
'%s is not a symbolic link, attempting to match it '
'to zoneinfo files', tzfile
)
# Regular file. Try to match the hash.
hash_type = __opts__.get('hash_type', 'md5')
tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)
# Not a link, just a copy of the tzdata file
for root, dirs, files in salt.utils.path.os_walk(tzdir):
for filename in files:
full_path = os.path.join(root, filename)
olson_name = full_path[tzdir_len:]
if olson_name[0] in string.ascii_lowercase:
continue
if tzfile_hash == \
salt.utils.hashutils.get_hash(full_path, hash_type):
return olson_name
raise CommandExecutionError('Unable to determine timezone')
def _get_zone_etc_timezone():
tzfile = '/etc/timezone'
try:
with salt.utils.files.fopen(tzfile, 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError as exc:
raise CommandExecutionError(
'Problem reading timezone file {0}: {1}'
.format(tzfile, exc.strerror)
)
def _get_zone_aix():
tzfile = '/etc/environment'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def get_zone():
'''
Get current timezone (i.e. America/Denver)
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values can also be returned
'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')):
try:
return re.match(r'Time ?zone:\s+(\S+)', line).group(1)
except AttributeError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
if __grains__['os'].lower() == 'centos':
return _get_zone_etc_localtime()
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
return _get_zone_sysconfig()
for family in ('Debian', 'Gentoo'):
if family in os_family:
return _get_zone_etc_timezone()
if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):
return _get_zone_etc_localtime()
elif 'Solaris' in os_family:
return _get_zone_solaris()
elif 'AIX' in os_family:
return _get_zone_aix()
raise CommandExecutionError('Unable to get timezone')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
return __salt__['cmd.run'](['date', '+%Z'], python_shell=False)
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
if 'AIX' not in __grains__['os_family']:
return __salt__['cmd.run'](['date', '+%z'], python_shell=False)
salt_path = '/opt/salt/bin/date'
if not os.path.exists(salt_path):
return 'date in salt binaries does not exist: {0}'.format(salt_path)
return __salt__['cmd.run']([salt_path, '+%z'], python_shell=False)
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values are also allowed, see below
.. code-block:: bash
salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
'''
if salt.utils.path.which('timedatectl'):
try:
__salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone))
except CommandExecutionError:
pass
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
zonepath = '/usr/share/lib/zoneinfo/{0}'.format(timezone)
else:
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath) and 'AIX' not in __grains__['os_family']:
return 'Zone does not exist: {0}'.format(zonepath)
tzfile = _get_localtime_path()
if os.path.exists(tzfile):
os.unlink(tzfile)
if 'Solaris' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/default/init', '^TZ=.*', 'TZ={0}'.format(timezone))
elif 'AIX' in __grains__['os_family']:
# timezone could be Olson or Posix
curtzstring = get_zone()
cmd = ['chtz', timezone]
result = __salt__['cmd.retcode'](cmd, python_shell=False)
if result == 0:
return True
# restore orig timezone, since AIX chtz failure sets UTC
cmd = ['chtz', curtzstring]
__salt__['cmd.retcode'](cmd, python_shell=False)
return False
else:
os.symlink(zonepath, tzfile)
if 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family'] or 'Gentoo' in __grains__['os_family']:
with salt.utils.files.fopen('/etc/timezone', 'w') as ofh:
ofh.write(salt.utils.stringutils.to_str(timezone).strip())
ofh.write('\n')
return True
def zone_compare(timezone):
'''
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
return timezone == get_zone()
if 'FreeBSD' in __grains__['os_family']:
if not os.path.isfile(_get_localtime_path()):
return timezone == get_zone()
tzfile = _get_localtime_path()
zonepath = _get_zone_file(timezone)
try:
return filecmp.cmp(tzfile, zonepath, shallow=False)
except OSError as exc:
problematic_file = exc.filename
if problematic_file == zonepath:
raise SaltInvocationError(
'Can\'t find a local timezone "{0}"'.format(timezone))
elif problematic_file == tzfile:
raise CommandExecutionError(
'Failed to read {0} to determine current timezone: {1}'
.format(tzfile, exc.strerror))
raise
def _get_localtime_path():
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:
return '/etc/natinst/share/localtime'
return '/etc/localtime'
def _get_zone_file(timezone):
return '/usr/share/zoneinfo/{0}'.format(timezone)
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
if salt.utils.path.which('timedatectl'):
cmd = ['timedatectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
else:
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True
|
saltstack/salt
|
salt/modules/timezone.py
|
get_zone
|
python
|
def get_zone():
'''
Get current timezone (i.e. America/Denver)
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values can also be returned
'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')):
try:
return re.match(r'Time ?zone:\s+(\S+)', line).group(1)
except AttributeError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
if __grains__['os'].lower() == 'centos':
return _get_zone_etc_localtime()
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
return _get_zone_sysconfig()
for family in ('Debian', 'Gentoo'):
if family in os_family:
return _get_zone_etc_timezone()
if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):
return _get_zone_etc_localtime()
elif 'Solaris' in os_family:
return _get_zone_solaris()
elif 'AIX' in os_family:
return _get_zone_aix()
raise CommandExecutionError('Unable to get timezone')
|
Get current timezone (i.e. America/Denver)
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values can also be returned
'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L161-L207
|
[
"def _timedatectl():\n '''\n get the output of timedatectl\n '''\n ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)\n\n if ret['retcode'] != 0:\n msg = 'timedatectl failed: {0}'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret\n",
"def _get_zone_solaris():\n tzfile = '/etc/TIMEZONE'\n with salt.utils.files.fopen(tzfile, 'r') as fp_:\n for line in fp_:\n line = salt.utils.stringutils.to_unicode(line)\n if 'TZ=' in line:\n zonepart = line.rstrip('\\n').split('=')[-1]\n return zonepart.strip('\\'\"') or 'UTC'\n raise CommandExecutionError('Unable to get timezone from ' + tzfile)\n",
"def _get_zone_sysconfig():\n tzfile = '/etc/sysconfig/clock'\n with salt.utils.files.fopen(tzfile, 'r') as fp_:\n for line in fp_:\n line = salt.utils.stringutils.to_unicode(line)\n if re.match(r'^\\s*#', line):\n continue\n if 'ZONE' in line and '=' in line:\n zonepart = line.rstrip('\\n').split('=')[-1]\n return zonepart.strip('\\'\"') or 'UTC'\n raise CommandExecutionError('Unable to get timezone from ' + tzfile)\n",
"def _get_zone_etc_localtime():\n tzfile = _get_localtime_path()\n tzdir = '/usr/share/zoneinfo/'\n tzdir_len = len(tzdir)\n try:\n olson_name = os.path.normpath(\n os.path.join('/etc', os.readlink(tzfile))\n )\n if olson_name.startswith(tzdir):\n return olson_name[tzdir_len:]\n except OSError as exc:\n if exc.errno == errno.ENOENT:\n if 'FreeBSD' in __grains__['os_family']:\n return get_zonecode()\n raise CommandExecutionError(tzfile + ' does not exist')\n elif exc.errno == errno.EINVAL:\n if 'FreeBSD' in __grains__['os_family']:\n return get_zonecode()\n log.warning(\n '%s is not a symbolic link, attempting to match it '\n 'to zoneinfo files', tzfile\n )\n # Regular file. Try to match the hash.\n hash_type = __opts__.get('hash_type', 'md5')\n tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)\n # Not a link, just a copy of the tzdata file\n for root, dirs, files in salt.utils.path.os_walk(tzdir):\n for filename in files:\n full_path = os.path.join(root, filename)\n olson_name = full_path[tzdir_len:]\n if olson_name[0] in string.ascii_lowercase:\n continue\n if tzfile_hash == \\\n salt.utils.hashutils.get_hash(full_path, hash_type):\n return olson_name\n raise CommandExecutionError('Unable to determine timezone')\n",
"def _get_zone_etc_timezone():\n tzfile = '/etc/timezone'\n try:\n with salt.utils.files.fopen(tzfile, 'r') as fp_:\n return salt.utils.stringutils.to_unicode(fp_.read()).strip()\n except IOError as exc:\n raise CommandExecutionError(\n 'Problem reading timezone file {0}: {1}'\n .format(tzfile, exc.strerror)\n )\n",
"def _get_zone_aix():\n tzfile = '/etc/environment'\n with salt.utils.files.fopen(tzfile, 'r') as fp_:\n for line in fp_:\n line = salt.utils.stringutils.to_unicode(line)\n if 'TZ=' in line:\n zonepart = line.rstrip('\\n').split('=')[-1]\n return zonepart.strip('\\'\"') or 'UTC'\n raise CommandExecutionError('Unable to get timezone from ' + tzfile)\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing timezone on POSIX-like systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import filecmp
import os
import errno
import logging
import re
import string
# Import salt libs
import salt.utils.files
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError, CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = 'timezone'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.platform.is_windows():
return (False, 'The timezone execution module failed to load: '
'win_timezone.py should replace this module on Windows.'
'There was a problem loading win_timezone.py.')
if salt.utils.platform.is_darwin():
return (False, 'The timezone execution module failed to load: '
'mac_timezone.py should replace this module on macOS.'
'There was a problem loading mac_timezone.py.')
return __virtualname__
def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret
def _get_zone_solaris():
tzfile = '/etc/TIMEZONE'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_adjtime_timezone():
'''
Return the timezone in /etc/adjtime of the system clock
'''
adjtime_file = '/etc/adjtime'
if os.path.exists(adjtime_file):
cmd = ['tail', '-n', '1', adjtime_file]
return __salt__['cmd.run'](cmd, python_shell=False)
elif os.path.exists('/dev/rtc'):
raise CommandExecutionError(
'Unable to get hwclock timezone from ' + adjtime_file
)
else:
# There is no RTC.
return None
def _get_zone_sysconfig():
tzfile = '/etc/sysconfig/clock'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'ZONE' in line and '=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_etc_localtime():
tzfile = _get_localtime_path()
tzdir = '/usr/share/zoneinfo/'
tzdir_len = len(tzdir)
try:
olson_name = os.path.normpath(
os.path.join('/etc', os.readlink(tzfile))
)
if olson_name.startswith(tzdir):
return olson_name[tzdir_len:]
except OSError as exc:
if exc.errno == errno.ENOENT:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
raise CommandExecutionError(tzfile + ' does not exist')
elif exc.errno == errno.EINVAL:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
log.warning(
'%s is not a symbolic link, attempting to match it '
'to zoneinfo files', tzfile
)
# Regular file. Try to match the hash.
hash_type = __opts__.get('hash_type', 'md5')
tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)
# Not a link, just a copy of the tzdata file
for root, dirs, files in salt.utils.path.os_walk(tzdir):
for filename in files:
full_path = os.path.join(root, filename)
olson_name = full_path[tzdir_len:]
if olson_name[0] in string.ascii_lowercase:
continue
if tzfile_hash == \
salt.utils.hashutils.get_hash(full_path, hash_type):
return olson_name
raise CommandExecutionError('Unable to determine timezone')
def _get_zone_etc_timezone():
tzfile = '/etc/timezone'
try:
with salt.utils.files.fopen(tzfile, 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError as exc:
raise CommandExecutionError(
'Problem reading timezone file {0}: {1}'
.format(tzfile, exc.strerror)
)
def _get_zone_aix():
tzfile = '/etc/environment'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
return __salt__['cmd.run'](['date', '+%Z'], python_shell=False)
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
if 'AIX' not in __grains__['os_family']:
return __salt__['cmd.run'](['date', '+%z'], python_shell=False)
salt_path = '/opt/salt/bin/date'
if not os.path.exists(salt_path):
return 'date in salt binaries does not exist: {0}'.format(salt_path)
return __salt__['cmd.run']([salt_path, '+%z'], python_shell=False)
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values are also allowed, see below
.. code-block:: bash
salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
'''
if salt.utils.path.which('timedatectl'):
try:
__salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone))
except CommandExecutionError:
pass
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
zonepath = '/usr/share/lib/zoneinfo/{0}'.format(timezone)
else:
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath) and 'AIX' not in __grains__['os_family']:
return 'Zone does not exist: {0}'.format(zonepath)
tzfile = _get_localtime_path()
if os.path.exists(tzfile):
os.unlink(tzfile)
if 'Solaris' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/default/init', '^TZ=.*', 'TZ={0}'.format(timezone))
elif 'AIX' in __grains__['os_family']:
# timezone could be Olson or Posix
curtzstring = get_zone()
cmd = ['chtz', timezone]
result = __salt__['cmd.retcode'](cmd, python_shell=False)
if result == 0:
return True
# restore orig timezone, since AIX chtz failure sets UTC
cmd = ['chtz', curtzstring]
__salt__['cmd.retcode'](cmd, python_shell=False)
return False
else:
os.symlink(zonepath, tzfile)
if 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family'] or 'Gentoo' in __grains__['os_family']:
with salt.utils.files.fopen('/etc/timezone', 'w') as ofh:
ofh.write(salt.utils.stringutils.to_str(timezone).strip())
ofh.write('\n')
return True
def zone_compare(timezone):
'''
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
return timezone == get_zone()
if 'FreeBSD' in __grains__['os_family']:
if not os.path.isfile(_get_localtime_path()):
return timezone == get_zone()
tzfile = _get_localtime_path()
zonepath = _get_zone_file(timezone)
try:
return filecmp.cmp(tzfile, zonepath, shallow=False)
except OSError as exc:
problematic_file = exc.filename
if problematic_file == zonepath:
raise SaltInvocationError(
'Can\'t find a local timezone "{0}"'.format(timezone))
elif problematic_file == tzfile:
raise CommandExecutionError(
'Failed to read {0} to determine current timezone: {1}'
.format(tzfile, exc.strerror))
raise
def _get_localtime_path():
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:
return '/etc/natinst/share/localtime'
return '/etc/localtime'
def _get_zone_file(timezone):
return '/usr/share/zoneinfo/{0}'.format(timezone)
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
if salt.utils.path.which('timedatectl'):
cmd = ['timedatectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
else:
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True
|
saltstack/salt
|
salt/modules/timezone.py
|
get_offset
|
python
|
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
if 'AIX' not in __grains__['os_family']:
return __salt__['cmd.run'](['date', '+%z'], python_shell=False)
salt_path = '/opt/salt/bin/date'
if not os.path.exists(salt_path):
return 'date in salt binaries does not exist: {0}'.format(salt_path)
return __salt__['cmd.run']([salt_path, '+%z'], python_shell=False)
|
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L223-L241
| null |
# -*- coding: utf-8 -*-
'''
Module for managing timezone on POSIX-like systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import filecmp
import os
import errno
import logging
import re
import string
# Import salt libs
import salt.utils.files
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError, CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = 'timezone'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.platform.is_windows():
return (False, 'The timezone execution module failed to load: '
'win_timezone.py should replace this module on Windows.'
'There was a problem loading win_timezone.py.')
if salt.utils.platform.is_darwin():
return (False, 'The timezone execution module failed to load: '
'mac_timezone.py should replace this module on macOS.'
'There was a problem loading mac_timezone.py.')
return __virtualname__
def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret
def _get_zone_solaris():
tzfile = '/etc/TIMEZONE'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_adjtime_timezone():
'''
Return the timezone in /etc/adjtime of the system clock
'''
adjtime_file = '/etc/adjtime'
if os.path.exists(adjtime_file):
cmd = ['tail', '-n', '1', adjtime_file]
return __salt__['cmd.run'](cmd, python_shell=False)
elif os.path.exists('/dev/rtc'):
raise CommandExecutionError(
'Unable to get hwclock timezone from ' + adjtime_file
)
else:
# There is no RTC.
return None
def _get_zone_sysconfig():
tzfile = '/etc/sysconfig/clock'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'ZONE' in line and '=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_etc_localtime():
tzfile = _get_localtime_path()
tzdir = '/usr/share/zoneinfo/'
tzdir_len = len(tzdir)
try:
olson_name = os.path.normpath(
os.path.join('/etc', os.readlink(tzfile))
)
if olson_name.startswith(tzdir):
return olson_name[tzdir_len:]
except OSError as exc:
if exc.errno == errno.ENOENT:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
raise CommandExecutionError(tzfile + ' does not exist')
elif exc.errno == errno.EINVAL:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
log.warning(
'%s is not a symbolic link, attempting to match it '
'to zoneinfo files', tzfile
)
# Regular file. Try to match the hash.
hash_type = __opts__.get('hash_type', 'md5')
tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)
# Not a link, just a copy of the tzdata file
for root, dirs, files in salt.utils.path.os_walk(tzdir):
for filename in files:
full_path = os.path.join(root, filename)
olson_name = full_path[tzdir_len:]
if olson_name[0] in string.ascii_lowercase:
continue
if tzfile_hash == \
salt.utils.hashutils.get_hash(full_path, hash_type):
return olson_name
raise CommandExecutionError('Unable to determine timezone')
def _get_zone_etc_timezone():
tzfile = '/etc/timezone'
try:
with salt.utils.files.fopen(tzfile, 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError as exc:
raise CommandExecutionError(
'Problem reading timezone file {0}: {1}'
.format(tzfile, exc.strerror)
)
def _get_zone_aix():
tzfile = '/etc/environment'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def get_zone():
'''
Get current timezone (i.e. America/Denver)
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values can also be returned
'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')):
try:
return re.match(r'Time ?zone:\s+(\S+)', line).group(1)
except AttributeError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
if __grains__['os'].lower() == 'centos':
return _get_zone_etc_localtime()
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
return _get_zone_sysconfig()
for family in ('Debian', 'Gentoo'):
if family in os_family:
return _get_zone_etc_timezone()
if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):
return _get_zone_etc_localtime()
elif 'Solaris' in os_family:
return _get_zone_solaris()
elif 'AIX' in os_family:
return _get_zone_aix()
raise CommandExecutionError('Unable to get timezone')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
return __salt__['cmd.run'](['date', '+%Z'], python_shell=False)
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values are also allowed, see below
.. code-block:: bash
salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
'''
if salt.utils.path.which('timedatectl'):
try:
__salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone))
except CommandExecutionError:
pass
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
zonepath = '/usr/share/lib/zoneinfo/{0}'.format(timezone)
else:
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath) and 'AIX' not in __grains__['os_family']:
return 'Zone does not exist: {0}'.format(zonepath)
tzfile = _get_localtime_path()
if os.path.exists(tzfile):
os.unlink(tzfile)
if 'Solaris' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/default/init', '^TZ=.*', 'TZ={0}'.format(timezone))
elif 'AIX' in __grains__['os_family']:
# timezone could be Olson or Posix
curtzstring = get_zone()
cmd = ['chtz', timezone]
result = __salt__['cmd.retcode'](cmd, python_shell=False)
if result == 0:
return True
# restore orig timezone, since AIX chtz failure sets UTC
cmd = ['chtz', curtzstring]
__salt__['cmd.retcode'](cmd, python_shell=False)
return False
else:
os.symlink(zonepath, tzfile)
if 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family'] or 'Gentoo' in __grains__['os_family']:
with salt.utils.files.fopen('/etc/timezone', 'w') as ofh:
ofh.write(salt.utils.stringutils.to_str(timezone).strip())
ofh.write('\n')
return True
def zone_compare(timezone):
'''
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
return timezone == get_zone()
if 'FreeBSD' in __grains__['os_family']:
if not os.path.isfile(_get_localtime_path()):
return timezone == get_zone()
tzfile = _get_localtime_path()
zonepath = _get_zone_file(timezone)
try:
return filecmp.cmp(tzfile, zonepath, shallow=False)
except OSError as exc:
problematic_file = exc.filename
if problematic_file == zonepath:
raise SaltInvocationError(
'Can\'t find a local timezone "{0}"'.format(timezone))
elif problematic_file == tzfile:
raise CommandExecutionError(
'Failed to read {0} to determine current timezone: {1}'
.format(tzfile, exc.strerror))
raise
def _get_localtime_path():
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:
return '/etc/natinst/share/localtime'
return '/etc/localtime'
def _get_zone_file(timezone):
return '/usr/share/zoneinfo/{0}'.format(timezone)
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
if salt.utils.path.which('timedatectl'):
cmd = ['timedatectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
else:
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True
|
saltstack/salt
|
salt/modules/timezone.py
|
set_zone
|
python
|
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values are also allowed, see below
.. code-block:: bash
salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
'''
if salt.utils.path.which('timedatectl'):
try:
__salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone))
except CommandExecutionError:
pass
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
zonepath = '/usr/share/lib/zoneinfo/{0}'.format(timezone)
else:
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath) and 'AIX' not in __grains__['os_family']:
return 'Zone does not exist: {0}'.format(zonepath)
tzfile = _get_localtime_path()
if os.path.exists(tzfile):
os.unlink(tzfile)
if 'Solaris' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/default/init', '^TZ=.*', 'TZ={0}'.format(timezone))
elif 'AIX' in __grains__['os_family']:
# timezone could be Olson or Posix
curtzstring = get_zone()
cmd = ['chtz', timezone]
result = __salt__['cmd.retcode'](cmd, python_shell=False)
if result == 0:
return True
# restore orig timezone, since AIX chtz failure sets UTC
cmd = ['chtz', curtzstring]
__salt__['cmd.retcode'](cmd, python_shell=False)
return False
else:
os.symlink(zonepath, tzfile)
if 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family'] or 'Gentoo' in __grains__['os_family']:
with salt.utils.files.fopen('/etc/timezone', 'w') as ofh:
ofh.write(salt.utils.stringutils.to_str(timezone).strip())
ofh.write('\n')
return True
|
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values are also allowed, see below
.. code-block:: bash
salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L244-L317
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n",
"def get_zone():\n '''\n Get current timezone (i.e. America/Denver)\n\n .. versionchanged:: 2016.11.4\n\n .. note::\n\n On AIX operating systems, Posix values can also be returned\n 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' timezone.get_zone\n '''\n if salt.utils.path.which('timedatectl'):\n ret = _timedatectl()\n\n for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\\n')):\n try:\n return re.match(r'Time ?zone:\\s+(\\S+)', line).group(1)\n except AttributeError:\n pass\n\n msg = ('Failed to parse timedatectl output: {0}\\n'\n 'Please file an issue with SaltStack').format(ret['stdout'])\n raise CommandExecutionError(msg)\n\n else:\n if __grains__['os'].lower() == 'centos':\n return _get_zone_etc_localtime()\n os_family = __grains__['os_family']\n for family in ('RedHat', 'Suse'):\n if family in os_family:\n return _get_zone_sysconfig()\n for family in ('Debian', 'Gentoo'):\n if family in os_family:\n return _get_zone_etc_timezone()\n if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):\n return _get_zone_etc_localtime()\n elif 'Solaris' in os_family:\n return _get_zone_solaris()\n elif 'AIX' in os_family:\n return _get_zone_aix()\n raise CommandExecutionError('Unable to get timezone')\n",
"def _get_localtime_path():\n if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:\n return '/etc/natinst/share/localtime'\n return '/etc/localtime'\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing timezone on POSIX-like systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import filecmp
import os
import errno
import logging
import re
import string
# Import salt libs
import salt.utils.files
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError, CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = 'timezone'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.platform.is_windows():
return (False, 'The timezone execution module failed to load: '
'win_timezone.py should replace this module on Windows.'
'There was a problem loading win_timezone.py.')
if salt.utils.platform.is_darwin():
return (False, 'The timezone execution module failed to load: '
'mac_timezone.py should replace this module on macOS.'
'There was a problem loading mac_timezone.py.')
return __virtualname__
def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret
def _get_zone_solaris():
tzfile = '/etc/TIMEZONE'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_adjtime_timezone():
'''
Return the timezone in /etc/adjtime of the system clock
'''
adjtime_file = '/etc/adjtime'
if os.path.exists(adjtime_file):
cmd = ['tail', '-n', '1', adjtime_file]
return __salt__['cmd.run'](cmd, python_shell=False)
elif os.path.exists('/dev/rtc'):
raise CommandExecutionError(
'Unable to get hwclock timezone from ' + adjtime_file
)
else:
# There is no RTC.
return None
def _get_zone_sysconfig():
tzfile = '/etc/sysconfig/clock'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'ZONE' in line and '=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_etc_localtime():
tzfile = _get_localtime_path()
tzdir = '/usr/share/zoneinfo/'
tzdir_len = len(tzdir)
try:
olson_name = os.path.normpath(
os.path.join('/etc', os.readlink(tzfile))
)
if olson_name.startswith(tzdir):
return olson_name[tzdir_len:]
except OSError as exc:
if exc.errno == errno.ENOENT:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
raise CommandExecutionError(tzfile + ' does not exist')
elif exc.errno == errno.EINVAL:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
log.warning(
'%s is not a symbolic link, attempting to match it '
'to zoneinfo files', tzfile
)
# Regular file. Try to match the hash.
hash_type = __opts__.get('hash_type', 'md5')
tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)
# Not a link, just a copy of the tzdata file
for root, dirs, files in salt.utils.path.os_walk(tzdir):
for filename in files:
full_path = os.path.join(root, filename)
olson_name = full_path[tzdir_len:]
if olson_name[0] in string.ascii_lowercase:
continue
if tzfile_hash == \
salt.utils.hashutils.get_hash(full_path, hash_type):
return olson_name
raise CommandExecutionError('Unable to determine timezone')
def _get_zone_etc_timezone():
tzfile = '/etc/timezone'
try:
with salt.utils.files.fopen(tzfile, 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError as exc:
raise CommandExecutionError(
'Problem reading timezone file {0}: {1}'
.format(tzfile, exc.strerror)
)
def _get_zone_aix():
tzfile = '/etc/environment'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def get_zone():
'''
Get current timezone (i.e. America/Denver)
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values can also be returned
'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')):
try:
return re.match(r'Time ?zone:\s+(\S+)', line).group(1)
except AttributeError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
if __grains__['os'].lower() == 'centos':
return _get_zone_etc_localtime()
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
return _get_zone_sysconfig()
for family in ('Debian', 'Gentoo'):
if family in os_family:
return _get_zone_etc_timezone()
if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):
return _get_zone_etc_localtime()
elif 'Solaris' in os_family:
return _get_zone_solaris()
elif 'AIX' in os_family:
return _get_zone_aix()
raise CommandExecutionError('Unable to get timezone')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
return __salt__['cmd.run'](['date', '+%Z'], python_shell=False)
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
if 'AIX' not in __grains__['os_family']:
return __salt__['cmd.run'](['date', '+%z'], python_shell=False)
salt_path = '/opt/salt/bin/date'
if not os.path.exists(salt_path):
return 'date in salt binaries does not exist: {0}'.format(salt_path)
return __salt__['cmd.run']([salt_path, '+%z'], python_shell=False)
def zone_compare(timezone):
'''
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
return timezone == get_zone()
if 'FreeBSD' in __grains__['os_family']:
if not os.path.isfile(_get_localtime_path()):
return timezone == get_zone()
tzfile = _get_localtime_path()
zonepath = _get_zone_file(timezone)
try:
return filecmp.cmp(tzfile, zonepath, shallow=False)
except OSError as exc:
problematic_file = exc.filename
if problematic_file == zonepath:
raise SaltInvocationError(
'Can\'t find a local timezone "{0}"'.format(timezone))
elif problematic_file == tzfile:
raise CommandExecutionError(
'Failed to read {0} to determine current timezone: {1}'
.format(tzfile, exc.strerror))
raise
def _get_localtime_path():
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:
return '/etc/natinst/share/localtime'
return '/etc/localtime'
def _get_zone_file(timezone):
return '/usr/share/zoneinfo/{0}'.format(timezone)
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
if salt.utils.path.which('timedatectl'):
cmd = ['timedatectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
else:
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True
|
saltstack/salt
|
salt/modules/timezone.py
|
zone_compare
|
python
|
def zone_compare(timezone):
'''
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
return timezone == get_zone()
if 'FreeBSD' in __grains__['os_family']:
if not os.path.isfile(_get_localtime_path()):
return timezone == get_zone()
tzfile = _get_localtime_path()
zonepath = _get_zone_file(timezone)
try:
return filecmp.cmp(tzfile, zonepath, shallow=False)
except OSError as exc:
problematic_file = exc.filename
if problematic_file == zonepath:
raise SaltInvocationError(
'Can\'t find a local timezone "{0}"'.format(timezone))
elif problematic_file == tzfile:
raise CommandExecutionError(
'Failed to read {0} to determine current timezone: {1}'
.format(tzfile, exc.strerror))
raise
|
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L320-L365
|
[
"def get_zone():\n '''\n Get current timezone (i.e. America/Denver)\n\n .. versionchanged:: 2016.11.4\n\n .. note::\n\n On AIX operating systems, Posix values can also be returned\n 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' timezone.get_zone\n '''\n if salt.utils.path.which('timedatectl'):\n ret = _timedatectl()\n\n for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\\n')):\n try:\n return re.match(r'Time ?zone:\\s+(\\S+)', line).group(1)\n except AttributeError:\n pass\n\n msg = ('Failed to parse timedatectl output: {0}\\n'\n 'Please file an issue with SaltStack').format(ret['stdout'])\n raise CommandExecutionError(msg)\n\n else:\n if __grains__['os'].lower() == 'centos':\n return _get_zone_etc_localtime()\n os_family = __grains__['os_family']\n for family in ('RedHat', 'Suse'):\n if family in os_family:\n return _get_zone_sysconfig()\n for family in ('Debian', 'Gentoo'):\n if family in os_family:\n return _get_zone_etc_timezone()\n if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):\n return _get_zone_etc_localtime()\n elif 'Solaris' in os_family:\n return _get_zone_solaris()\n elif 'AIX' in os_family:\n return _get_zone_aix()\n raise CommandExecutionError('Unable to get timezone')\n",
"def _get_localtime_path():\n if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:\n return '/etc/natinst/share/localtime'\n return '/etc/localtime'\n",
"def _get_zone_file(timezone):\n return '/usr/share/zoneinfo/{0}'.format(timezone)\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing timezone on POSIX-like systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import filecmp
import os
import errno
import logging
import re
import string
# Import salt libs
import salt.utils.files
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError, CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = 'timezone'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.platform.is_windows():
return (False, 'The timezone execution module failed to load: '
'win_timezone.py should replace this module on Windows.'
'There was a problem loading win_timezone.py.')
if salt.utils.platform.is_darwin():
return (False, 'The timezone execution module failed to load: '
'mac_timezone.py should replace this module on macOS.'
'There was a problem loading mac_timezone.py.')
return __virtualname__
def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret
def _get_zone_solaris():
tzfile = '/etc/TIMEZONE'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_adjtime_timezone():
'''
Return the timezone in /etc/adjtime of the system clock
'''
adjtime_file = '/etc/adjtime'
if os.path.exists(adjtime_file):
cmd = ['tail', '-n', '1', adjtime_file]
return __salt__['cmd.run'](cmd, python_shell=False)
elif os.path.exists('/dev/rtc'):
raise CommandExecutionError(
'Unable to get hwclock timezone from ' + adjtime_file
)
else:
# There is no RTC.
return None
def _get_zone_sysconfig():
tzfile = '/etc/sysconfig/clock'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'ZONE' in line and '=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_etc_localtime():
tzfile = _get_localtime_path()
tzdir = '/usr/share/zoneinfo/'
tzdir_len = len(tzdir)
try:
olson_name = os.path.normpath(
os.path.join('/etc', os.readlink(tzfile))
)
if olson_name.startswith(tzdir):
return olson_name[tzdir_len:]
except OSError as exc:
if exc.errno == errno.ENOENT:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
raise CommandExecutionError(tzfile + ' does not exist')
elif exc.errno == errno.EINVAL:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
log.warning(
'%s is not a symbolic link, attempting to match it '
'to zoneinfo files', tzfile
)
# Regular file. Try to match the hash.
hash_type = __opts__.get('hash_type', 'md5')
tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)
# Not a link, just a copy of the tzdata file
for root, dirs, files in salt.utils.path.os_walk(tzdir):
for filename in files:
full_path = os.path.join(root, filename)
olson_name = full_path[tzdir_len:]
if olson_name[0] in string.ascii_lowercase:
continue
if tzfile_hash == \
salt.utils.hashutils.get_hash(full_path, hash_type):
return olson_name
raise CommandExecutionError('Unable to determine timezone')
def _get_zone_etc_timezone():
tzfile = '/etc/timezone'
try:
with salt.utils.files.fopen(tzfile, 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError as exc:
raise CommandExecutionError(
'Problem reading timezone file {0}: {1}'
.format(tzfile, exc.strerror)
)
def _get_zone_aix():
tzfile = '/etc/environment'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def get_zone():
'''
Get current timezone (i.e. America/Denver)
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values can also be returned
'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')):
try:
return re.match(r'Time ?zone:\s+(\S+)', line).group(1)
except AttributeError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
if __grains__['os'].lower() == 'centos':
return _get_zone_etc_localtime()
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
return _get_zone_sysconfig()
for family in ('Debian', 'Gentoo'):
if family in os_family:
return _get_zone_etc_timezone()
if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):
return _get_zone_etc_localtime()
elif 'Solaris' in os_family:
return _get_zone_solaris()
elif 'AIX' in os_family:
return _get_zone_aix()
raise CommandExecutionError('Unable to get timezone')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
return __salt__['cmd.run'](['date', '+%Z'], python_shell=False)
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
if 'AIX' not in __grains__['os_family']:
return __salt__['cmd.run'](['date', '+%z'], python_shell=False)
salt_path = '/opt/salt/bin/date'
if not os.path.exists(salt_path):
return 'date in salt binaries does not exist: {0}'.format(salt_path)
return __salt__['cmd.run']([salt_path, '+%z'], python_shell=False)
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values are also allowed, see below
.. code-block:: bash
salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
'''
if salt.utils.path.which('timedatectl'):
try:
__salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone))
except CommandExecutionError:
pass
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
zonepath = '/usr/share/lib/zoneinfo/{0}'.format(timezone)
else:
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath) and 'AIX' not in __grains__['os_family']:
return 'Zone does not exist: {0}'.format(zonepath)
tzfile = _get_localtime_path()
if os.path.exists(tzfile):
os.unlink(tzfile)
if 'Solaris' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/default/init', '^TZ=.*', 'TZ={0}'.format(timezone))
elif 'AIX' in __grains__['os_family']:
# timezone could be Olson or Posix
curtzstring = get_zone()
cmd = ['chtz', timezone]
result = __salt__['cmd.retcode'](cmd, python_shell=False)
if result == 0:
return True
# restore orig timezone, since AIX chtz failure sets UTC
cmd = ['chtz', curtzstring]
__salt__['cmd.retcode'](cmd, python_shell=False)
return False
else:
os.symlink(zonepath, tzfile)
if 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family'] or 'Gentoo' in __grains__['os_family']:
with salt.utils.files.fopen('/etc/timezone', 'w') as ofh:
ofh.write(salt.utils.stringutils.to_str(timezone).strip())
ofh.write('\n')
return True
def _get_localtime_path():
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:
return '/etc/natinst/share/localtime'
return '/etc/localtime'
def _get_zone_file(timezone):
return '/usr/share/zoneinfo/{0}'.format(timezone)
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
if salt.utils.path.which('timedatectl'):
cmd = ['timedatectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
else:
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True
|
saltstack/salt
|
salt/modules/timezone.py
|
get_hwclock
|
python
|
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
|
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L378-L488
|
[
"def _timedatectl():\n '''\n get the output of timedatectl\n '''\n ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)\n\n if ret['retcode'] != 0:\n msg = 'timedatectl failed: {0}'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret\n",
"def _get_adjtime_timezone():\n '''\n Return the timezone in /etc/adjtime of the system clock\n '''\n adjtime_file = '/etc/adjtime'\n if os.path.exists(adjtime_file):\n cmd = ['tail', '-n', '1', adjtime_file]\n return __salt__['cmd.run'](cmd, python_shell=False)\n elif os.path.exists('/dev/rtc'):\n raise CommandExecutionError(\n 'Unable to get hwclock timezone from ' + adjtime_file\n )\n else:\n # There is no RTC.\n return None\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing timezone on POSIX-like systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import filecmp
import os
import errno
import logging
import re
import string
# Import salt libs
import salt.utils.files
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError, CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = 'timezone'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.platform.is_windows():
return (False, 'The timezone execution module failed to load: '
'win_timezone.py should replace this module on Windows.'
'There was a problem loading win_timezone.py.')
if salt.utils.platform.is_darwin():
return (False, 'The timezone execution module failed to load: '
'mac_timezone.py should replace this module on macOS.'
'There was a problem loading mac_timezone.py.')
return __virtualname__
def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret
def _get_zone_solaris():
tzfile = '/etc/TIMEZONE'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_adjtime_timezone():
'''
Return the timezone in /etc/adjtime of the system clock
'''
adjtime_file = '/etc/adjtime'
if os.path.exists(adjtime_file):
cmd = ['tail', '-n', '1', adjtime_file]
return __salt__['cmd.run'](cmd, python_shell=False)
elif os.path.exists('/dev/rtc'):
raise CommandExecutionError(
'Unable to get hwclock timezone from ' + adjtime_file
)
else:
# There is no RTC.
return None
def _get_zone_sysconfig():
tzfile = '/etc/sysconfig/clock'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'ZONE' in line and '=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_etc_localtime():
tzfile = _get_localtime_path()
tzdir = '/usr/share/zoneinfo/'
tzdir_len = len(tzdir)
try:
olson_name = os.path.normpath(
os.path.join('/etc', os.readlink(tzfile))
)
if olson_name.startswith(tzdir):
return olson_name[tzdir_len:]
except OSError as exc:
if exc.errno == errno.ENOENT:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
raise CommandExecutionError(tzfile + ' does not exist')
elif exc.errno == errno.EINVAL:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
log.warning(
'%s is not a symbolic link, attempting to match it '
'to zoneinfo files', tzfile
)
# Regular file. Try to match the hash.
hash_type = __opts__.get('hash_type', 'md5')
tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)
# Not a link, just a copy of the tzdata file
for root, dirs, files in salt.utils.path.os_walk(tzdir):
for filename in files:
full_path = os.path.join(root, filename)
olson_name = full_path[tzdir_len:]
if olson_name[0] in string.ascii_lowercase:
continue
if tzfile_hash == \
salt.utils.hashutils.get_hash(full_path, hash_type):
return olson_name
raise CommandExecutionError('Unable to determine timezone')
def _get_zone_etc_timezone():
tzfile = '/etc/timezone'
try:
with salt.utils.files.fopen(tzfile, 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError as exc:
raise CommandExecutionError(
'Problem reading timezone file {0}: {1}'
.format(tzfile, exc.strerror)
)
def _get_zone_aix():
tzfile = '/etc/environment'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def get_zone():
'''
Get current timezone (i.e. America/Denver)
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values can also be returned
'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')):
try:
return re.match(r'Time ?zone:\s+(\S+)', line).group(1)
except AttributeError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
if __grains__['os'].lower() == 'centos':
return _get_zone_etc_localtime()
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
return _get_zone_sysconfig()
for family in ('Debian', 'Gentoo'):
if family in os_family:
return _get_zone_etc_timezone()
if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):
return _get_zone_etc_localtime()
elif 'Solaris' in os_family:
return _get_zone_solaris()
elif 'AIX' in os_family:
return _get_zone_aix()
raise CommandExecutionError('Unable to get timezone')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
return __salt__['cmd.run'](['date', '+%Z'], python_shell=False)
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
if 'AIX' not in __grains__['os_family']:
return __salt__['cmd.run'](['date', '+%z'], python_shell=False)
salt_path = '/opt/salt/bin/date'
if not os.path.exists(salt_path):
return 'date in salt binaries does not exist: {0}'.format(salt_path)
return __salt__['cmd.run']([salt_path, '+%z'], python_shell=False)
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values are also allowed, see below
.. code-block:: bash
salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
'''
if salt.utils.path.which('timedatectl'):
try:
__salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone))
except CommandExecutionError:
pass
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
zonepath = '/usr/share/lib/zoneinfo/{0}'.format(timezone)
else:
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath) and 'AIX' not in __grains__['os_family']:
return 'Zone does not exist: {0}'.format(zonepath)
tzfile = _get_localtime_path()
if os.path.exists(tzfile):
os.unlink(tzfile)
if 'Solaris' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/default/init', '^TZ=.*', 'TZ={0}'.format(timezone))
elif 'AIX' in __grains__['os_family']:
# timezone could be Olson or Posix
curtzstring = get_zone()
cmd = ['chtz', timezone]
result = __salt__['cmd.retcode'](cmd, python_shell=False)
if result == 0:
return True
# restore orig timezone, since AIX chtz failure sets UTC
cmd = ['chtz', curtzstring]
__salt__['cmd.retcode'](cmd, python_shell=False)
return False
else:
os.symlink(zonepath, tzfile)
if 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family'] or 'Gentoo' in __grains__['os_family']:
with salt.utils.files.fopen('/etc/timezone', 'w') as ofh:
ofh.write(salt.utils.stringutils.to_str(timezone).strip())
ofh.write('\n')
return True
def zone_compare(timezone):
'''
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
return timezone == get_zone()
if 'FreeBSD' in __grains__['os_family']:
if not os.path.isfile(_get_localtime_path()):
return timezone == get_zone()
tzfile = _get_localtime_path()
zonepath = _get_zone_file(timezone)
try:
return filecmp.cmp(tzfile, zonepath, shallow=False)
except OSError as exc:
problematic_file = exc.filename
if problematic_file == zonepath:
raise SaltInvocationError(
'Can\'t find a local timezone "{0}"'.format(timezone))
elif problematic_file == tzfile:
raise CommandExecutionError(
'Failed to read {0} to determine current timezone: {1}'
.format(tzfile, exc.strerror))
raise
def _get_localtime_path():
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:
return '/etc/natinst/share/localtime'
return '/etc/localtime'
def _get_zone_file(timezone):
return '/usr/share/zoneinfo/{0}'.format(timezone)
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
if salt.utils.path.which('timedatectl'):
cmd = ['timedatectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
else:
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True
|
saltstack/salt
|
salt/modules/timezone.py
|
set_hwclock
|
python
|
def set_hwclock(clock):
'''
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
'''
if salt.utils.path.which('timedatectl'):
cmd = ['timedatectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
else:
os_family = __grains__['os_family']
if os_family in ('AIX', 'NILinuxRT'):
if clock.lower() != 'utc':
raise SaltInvocationError(
'UTC is the only permitted value'
)
return True
timezone = get_zone()
if 'Solaris' in __grains__['os_family']:
if clock.lower() not in ('localtime', 'utc'):
raise SaltInvocationError(
'localtime and UTC are the only permitted values'
)
if 'sparc' in __grains__['cpuarch']:
raise SaltInvocationError(
'UTC is the only choice for SPARC architecture'
)
cmd = ['rtc', '-z', 'GMT' if clock.lower() == 'utc' else timezone]
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath):
raise CommandExecutionError(
'Zone \'{0}\' does not exist'.format(zonepath)
)
os.unlink('/etc/localtime')
os.symlink(zonepath, '/etc/localtime')
if 'Arch' in __grains__['os_family']:
cmd = ['timezonectl', 'set-local-rtc',
'true' if clock == 'localtime' else 'false']
return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
elif 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family']:
if clock == 'UTC':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=yes')
elif clock == 'localtime':
__salt__['file.sed']('/etc/default/rcS', '^UTC=.*', 'UTC=no')
elif 'Gentoo' in __grains__['os_family']:
if clock not in ('UTC', 'localtime'):
raise SaltInvocationError(
'Only \'UTC\' and \'localtime\' are allowed'
)
if clock == 'localtime':
clock = 'local'
__salt__['file.sed'](
'/etc/conf.d/hwclock', '^clock=.*', 'clock="{0}"'.format(clock))
return True
|
Sets the hardware clock to be either UTC or localtime
CLI Example:
.. code-block:: bash
salt '*' timezone.set_hwclock UTC
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L491-L563
|
[
"def get_zone():\n '''\n Get current timezone (i.e. America/Denver)\n\n .. versionchanged:: 2016.11.4\n\n .. note::\n\n On AIX operating systems, Posix values can also be returned\n 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' timezone.get_zone\n '''\n if salt.utils.path.which('timedatectl'):\n ret = _timedatectl()\n\n for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\\n')):\n try:\n return re.match(r'Time ?zone:\\s+(\\S+)', line).group(1)\n except AttributeError:\n pass\n\n msg = ('Failed to parse timedatectl output: {0}\\n'\n 'Please file an issue with SaltStack').format(ret['stdout'])\n raise CommandExecutionError(msg)\n\n else:\n if __grains__['os'].lower() == 'centos':\n return _get_zone_etc_localtime()\n os_family = __grains__['os_family']\n for family in ('RedHat', 'Suse'):\n if family in os_family:\n return _get_zone_sysconfig()\n for family in ('Debian', 'Gentoo'):\n if family in os_family:\n return _get_zone_etc_timezone()\n if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):\n return _get_zone_etc_localtime()\n elif 'Solaris' in os_family:\n return _get_zone_solaris()\n elif 'AIX' in os_family:\n return _get_zone_aix()\n raise CommandExecutionError('Unable to get timezone')\n"
] |
# -*- coding: utf-8 -*-
'''
Module for managing timezone on POSIX-like systems.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import filecmp
import os
import errno
import logging
import re
import string
# Import salt libs
import salt.utils.files
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import SaltInvocationError, CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = 'timezone'
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.platform.is_windows():
return (False, 'The timezone execution module failed to load: '
'win_timezone.py should replace this module on Windows.'
'There was a problem loading win_timezone.py.')
if salt.utils.platform.is_darwin():
return (False, 'The timezone execution module failed to load: '
'mac_timezone.py should replace this module on macOS.'
'There was a problem loading mac_timezone.py.')
return __virtualname__
def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret
def _get_zone_solaris():
tzfile = '/etc/TIMEZONE'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_adjtime_timezone():
'''
Return the timezone in /etc/adjtime of the system clock
'''
adjtime_file = '/etc/adjtime'
if os.path.exists(adjtime_file):
cmd = ['tail', '-n', '1', adjtime_file]
return __salt__['cmd.run'](cmd, python_shell=False)
elif os.path.exists('/dev/rtc'):
raise CommandExecutionError(
'Unable to get hwclock timezone from ' + adjtime_file
)
else:
# There is no RTC.
return None
def _get_zone_sysconfig():
tzfile = '/etc/sysconfig/clock'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'ZONE' in line and '=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def _get_zone_etc_localtime():
tzfile = _get_localtime_path()
tzdir = '/usr/share/zoneinfo/'
tzdir_len = len(tzdir)
try:
olson_name = os.path.normpath(
os.path.join('/etc', os.readlink(tzfile))
)
if olson_name.startswith(tzdir):
return olson_name[tzdir_len:]
except OSError as exc:
if exc.errno == errno.ENOENT:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
raise CommandExecutionError(tzfile + ' does not exist')
elif exc.errno == errno.EINVAL:
if 'FreeBSD' in __grains__['os_family']:
return get_zonecode()
log.warning(
'%s is not a symbolic link, attempting to match it '
'to zoneinfo files', tzfile
)
# Regular file. Try to match the hash.
hash_type = __opts__.get('hash_type', 'md5')
tzfile_hash = salt.utils.hashutils.get_hash(tzfile, hash_type)
# Not a link, just a copy of the tzdata file
for root, dirs, files in salt.utils.path.os_walk(tzdir):
for filename in files:
full_path = os.path.join(root, filename)
olson_name = full_path[tzdir_len:]
if olson_name[0] in string.ascii_lowercase:
continue
if tzfile_hash == \
salt.utils.hashutils.get_hash(full_path, hash_type):
return olson_name
raise CommandExecutionError('Unable to determine timezone')
def _get_zone_etc_timezone():
tzfile = '/etc/timezone'
try:
with salt.utils.files.fopen(tzfile, 'r') as fp_:
return salt.utils.stringutils.to_unicode(fp_.read()).strip()
except IOError as exc:
raise CommandExecutionError(
'Problem reading timezone file {0}: {1}'
.format(tzfile, exc.strerror)
)
def _get_zone_aix():
tzfile = '/etc/environment'
with salt.utils.files.fopen(tzfile, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if 'TZ=' in line:
zonepart = line.rstrip('\n').split('=')[-1]
return zonepart.strip('\'"') or 'UTC'
raise CommandExecutionError('Unable to get timezone from ' + tzfile)
def get_zone():
'''
Get current timezone (i.e. America/Denver)
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values can also be returned
'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in salt.utils.itertools.split(ret['stdout'], '\n')):
try:
return re.match(r'Time ?zone:\s+(\S+)', line).group(1)
except AttributeError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
if __grains__['os'].lower() == 'centos':
return _get_zone_etc_localtime()
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse'):
if family in os_family:
return _get_zone_sysconfig()
for family in ('Debian', 'Gentoo'):
if family in os_family:
return _get_zone_etc_timezone()
if os_family in ('FreeBSD', 'OpenBSD', 'NetBSD', 'NILinuxRT'):
return _get_zone_etc_localtime()
elif 'Solaris' in os_family:
return _get_zone_solaris()
elif 'AIX' in os_family:
return _get_zone_aix()
raise CommandExecutionError('Unable to get timezone')
def get_zonecode():
'''
Get current timezone (i.e. PST, MDT, etc)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zonecode
'''
return __salt__['cmd.run'](['date', '+%Z'], python_shell=False)
def get_offset():
'''
Get current numeric timezone offset from UCT (i.e. -0700)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_offset
'''
if 'AIX' not in __grains__['os_family']:
return __salt__['cmd.run'](['date', '+%z'], python_shell=False)
salt_path = '/opt/salt/bin/date'
if not os.path.exists(salt_path):
return 'date in salt binaries does not exist: {0}'.format(salt_path)
return __salt__['cmd.run']([salt_path, '+%z'], python_shell=False)
def set_zone(timezone):
'''
Unlinks, then symlinks /etc/localtime to the set timezone.
The timezone is crucial to several system processes, each of which SHOULD
be restarted (for instance, whatever you system uses as its cron and
syslog daemons). This will not be automagically done and must be done
manually!
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems, Posix values are also allowed, see below
.. code-block:: bash
salt '*' timezone.set_zone 'CST6CDT,M3.2.0/2:00:00,M11.1.0/2:00:00'
'''
if salt.utils.path.which('timedatectl'):
try:
__salt__['cmd.run']('timedatectl set-timezone {0}'.format(timezone))
except CommandExecutionError:
pass
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
zonepath = '/usr/share/lib/zoneinfo/{0}'.format(timezone)
else:
zonepath = '/usr/share/zoneinfo/{0}'.format(timezone)
if not os.path.exists(zonepath) and 'AIX' not in __grains__['os_family']:
return 'Zone does not exist: {0}'.format(zonepath)
tzfile = _get_localtime_path()
if os.path.exists(tzfile):
os.unlink(tzfile)
if 'Solaris' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/default/init', '^TZ=.*', 'TZ={0}'.format(timezone))
elif 'AIX' in __grains__['os_family']:
# timezone could be Olson or Posix
curtzstring = get_zone()
cmd = ['chtz', timezone]
result = __salt__['cmd.retcode'](cmd, python_shell=False)
if result == 0:
return True
# restore orig timezone, since AIX chtz failure sets UTC
cmd = ['chtz', curtzstring]
__salt__['cmd.retcode'](cmd, python_shell=False)
return False
else:
os.symlink(zonepath, tzfile)
if 'RedHat' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^ZONE=.*', 'ZONE="{0}"'.format(timezone))
elif 'Suse' in __grains__['os_family']:
__salt__['file.sed'](
'/etc/sysconfig/clock', '^TIMEZONE=.*', 'TIMEZONE="{0}"'.format(timezone))
elif 'Debian' in __grains__['os_family'] or 'Gentoo' in __grains__['os_family']:
with salt.utils.files.fopen('/etc/timezone', 'w') as ofh:
ofh.write(salt.utils.stringutils.to_str(timezone).strip())
ofh.write('\n')
return True
def zone_compare(timezone):
'''
Compares the given timezone name with the system timezone name.
Checks the hash sum between the given timezone, and the one set in
/etc/localtime. Returns True if names and hash sums match, and False if not.
Mostly useful for running state checks.
.. versionchanged:: 2016.3.0
.. note::
On Solaris-link operating systems only a string comparison is done.
.. versionchanged:: 2016.11.4
.. note::
On AIX operating systems only a string comparison is done.
CLI Example:
.. code-block:: bash
salt '*' timezone.zone_compare 'America/Denver'
'''
if 'Solaris' in __grains__['os_family'] or 'AIX' in __grains__['os_family']:
return timezone == get_zone()
if 'FreeBSD' in __grains__['os_family']:
if not os.path.isfile(_get_localtime_path()):
return timezone == get_zone()
tzfile = _get_localtime_path()
zonepath = _get_zone_file(timezone)
try:
return filecmp.cmp(tzfile, zonepath, shallow=False)
except OSError as exc:
problematic_file = exc.filename
if problematic_file == zonepath:
raise SaltInvocationError(
'Can\'t find a local timezone "{0}"'.format(timezone))
elif problematic_file == tzfile:
raise CommandExecutionError(
'Failed to read {0} to determine current timezone: {1}'
.format(tzfile, exc.strerror))
raise
def _get_localtime_path():
if 'NILinuxRT' in __grains__['os_family'] and 'nilrt' in __grains__['lsb_distrib_id']:
return '/etc/natinst/share/localtime'
return '/etc/localtime'
def _get_zone_file(timezone):
return '/usr/share/zoneinfo/{0}'.format(timezone)
def get_hwclock():
'''
Get current hardware clock setting (UTC or localtime)
CLI Example:
.. code-block:: bash
salt '*' timezone.get_hwclock
'''
if salt.utils.path.which('timedatectl'):
ret = _timedatectl()
for line in (x.strip() for x in ret['stdout'].splitlines()):
if 'rtc in local tz' in line.lower():
try:
if line.split(':')[-1].strip().lower() == 'yes':
return 'localtime'
else:
return 'UTC'
except IndexError:
pass
msg = ('Failed to parse timedatectl output: {0}\n'
'Please file an issue with SaltStack').format(ret['stdout'])
raise CommandExecutionError(msg)
else:
os_family = __grains__['os_family']
for family in ('RedHat', 'Suse', 'NILinuxRT'):
if family in os_family:
return _get_adjtime_timezone()
if 'Debian' in __grains__['os_family']:
# Original way to look up hwclock on Debian-based systems
try:
with salt.utils.files.fopen('/etc/default/rcS', 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if re.match(r'^\s*#', line):
continue
if 'UTC=' in line:
is_utc = line.rstrip('\n').split('=')[-1].lower()
if is_utc == 'yes':
return 'UTC'
else:
return 'localtime'
except IOError as exc:
pass
# Since Wheezy
return _get_adjtime_timezone()
if 'Gentoo' in __grains__['os_family']:
if not os.path.exists('/etc/adjtime'):
offset_file = '/etc/conf.d/hwclock'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('clock='):
line = line.rstrip('\n')
line = line.split('=')[-1].strip('\'"')
if line == 'UTC':
return line
if line == 'local':
return 'LOCAL'
raise CommandExecutionError(
'Correct offset value not found in {0}'
.format(offset_file)
)
except IOError as exc:
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
return _get_adjtime_timezone()
if 'Solaris' in __grains__['os_family']:
offset_file = '/etc/rtc_config'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('zone_info=GMT'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
if 'AIX' in __grains__['os_family']:
offset_file = '/etc/environment'
try:
with salt.utils.files.fopen(offset_file, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('TZ=UTC'):
return 'UTC'
return 'localtime'
except IOError as exc:
if exc.errno == errno.ENOENT:
# offset file does not exist
return 'UTC'
raise CommandExecutionError(
'Problem reading offset file {0}: {1}'
.format(offset_file, exc.strerror)
)
|
saltstack/salt
|
salt/states/chocolatey.py
|
installed
|
python
|
def installed(name, version=None, source=None, force=False, pre_versions=False,
install_args=None, override_args=False, force_x86=False,
package_args=None, allow_multiple=False, execution_timeout=None):
'''
Installs a package if not already installed
Args:
name (str):
The name of the package to be installed. Required.
version (str):
Install a specific version of the package. Defaults to latest
version. If the version is different to the one installed then the
specified version will be installed. Default is None.
source (str):
Chocolatey repository (directory, share or remote URL, feed).
Defaults to the official Chocolatey feed. Default is None.
force (bool):
Reinstall the current version of an existing package. Do not use
with ``allow_multiple``. Default is False.
pre_versions (bool):
Include pre-release packages. Default is False.
install_args (str):
Install arguments you want to pass to the installation process, i.e
product key or feature list. Default is None.
override_args (bool):
Set to True if you want to override the original install arguments
(for the native installer) in the package and use your own. When
this is set to False install_args will be appended to the end of the
default arguments. Default is False.
force_x86 (bool):
Force x86 (32bit) installation on 64 bit systems. Default is False.
package_args (str):
Arguments you want to pass to the package. Default is None.
allow_multiple (bool):
Allow mulitiple versions of the package to be installed. Do not use
with ``force``. Does not work with all packages. Default is False.
.. versionadded:: 2017.7.0
execution_timeout (str):
Chocolatey execution timeout value you want to pass to the
installation process. Default is None.
.. code-block:: yaml
Installsomepackage:
chocolatey.installed:
- name: packagename
- version: '12.04'
- source: 'mychocolatey/source'
- force: True
'''
if force and allow_multiple:
raise SaltInvocationError(
'Cannot use \'force\' in conjunction with \'allow_multiple\'')
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Get list of currently installed packages
pre_install = __salt__['chocolatey.list'](local_only=True)
# Determine action
# Package not installed
if name.lower() not in [package.lower() for package in pre_install.keys()]:
if version:
ret['changes'] = {name: 'Version {0} will be installed'.format(version)}
else:
ret['changes'] = {name: 'Latest version will be installed'}
# Package installed
else:
version_info = __salt__['chocolatey.version'](name=name,
check_remote=True,
source=source)
full_name = name
for pkg in version_info:
if name.lower() == pkg.lower():
full_name = pkg
installed_version = version_info[full_name]['installed'][0]
if version:
if salt.utils.versions.compare(
ver1=installed_version, oper="==", ver2=version):
if force:
ret['changes'] = {
name: 'Version {0} will be reinstalled'.format(version)}
ret['comment'] = 'Reinstall {0} {1}'.format(full_name, version)
else:
ret['comment'] = '{0} {1} is already installed'.format(name, version)
if __opts__['test']:
ret['result'] = None
return ret
else:
if allow_multiple:
ret['changes'] = {
name: 'Version {0} will be installed side by side with '
'Version {1} if supported'.format(version, installed_version)
}
ret['comment'] = (
'Install {0} {1} side-by-side with {0} {2}'.format(
full_name, version, installed_version
)
)
else:
ret['changes'] = {
name: 'Version {0} will be installed over Version {1}'.format(version, installed_version)
}
ret['comment'] = 'Install {0} {1} over {0} {2}'.format(
full_name, version, installed_version
)
force = True
else:
version = installed_version
if force:
ret['changes'] = {
name: 'Version {0} will be reinstalled'.format(version)}
ret['comment'] = 'Reinstall {0} {1}'.format(full_name, version)
else:
ret['comment'] = '{0} {1} is already installed'.format(name, version)
if __opts__['test']:
ret['result'] = None
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The installation was tested'
return ret
# Install the package
result = __salt__['chocolatey.install'](name=name,
version=version,
source=source,
force=force,
pre_versions=pre_versions,
install_args=install_args,
override_args=override_args,
force_x86=force_x86,
package_args=package_args,
allow_multiple=allow_multiple,
execution_timeout=execution_timeout)
if 'Running chocolatey failed' not in result:
ret['result'] = True
else:
ret['result'] = False
if not ret['result']:
ret['comment'] = 'Failed to install the package {0}'.format(name)
# Get list of installed packages after 'chocolatey.install'
post_install = __salt__['chocolatey.list'](local_only=True)
ret['changes'] = salt.utils.data.compare_dicts(pre_install, post_install)
return ret
|
Installs a package if not already installed
Args:
name (str):
The name of the package to be installed. Required.
version (str):
Install a specific version of the package. Defaults to latest
version. If the version is different to the one installed then the
specified version will be installed. Default is None.
source (str):
Chocolatey repository (directory, share or remote URL, feed).
Defaults to the official Chocolatey feed. Default is None.
force (bool):
Reinstall the current version of an existing package. Do not use
with ``allow_multiple``. Default is False.
pre_versions (bool):
Include pre-release packages. Default is False.
install_args (str):
Install arguments you want to pass to the installation process, i.e
product key or feature list. Default is None.
override_args (bool):
Set to True if you want to override the original install arguments
(for the native installer) in the package and use your own. When
this is set to False install_args will be appended to the end of the
default arguments. Default is False.
force_x86 (bool):
Force x86 (32bit) installation on 64 bit systems. Default is False.
package_args (str):
Arguments you want to pass to the package. Default is None.
allow_multiple (bool):
Allow mulitiple versions of the package to be installed. Do not use
with ``force``. Does not work with all packages. Default is False.
.. versionadded:: 2017.7.0
execution_timeout (str):
Chocolatey execution timeout value you want to pass to the
installation process. Default is None.
.. code-block:: yaml
Installsomepackage:
chocolatey.installed:
- name: packagename
- version: '12.04'
- source: 'mychocolatey/source'
- force: True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/chocolatey.py#L30-L199
|
[
"def compare(ver1='', oper='==', ver2='', cmp_func=None, ignore_epoch=False):\n '''\n Compares two version numbers. Accepts a custom function to perform the\n cmp-style version comparison, otherwise uses version_cmp().\n '''\n cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,),\n '>=': (0, 1), '>': (1,)}\n if oper not in ('!=',) and oper not in cmp_map:\n log.error('Invalid operator \\'%s\\' for version comparison', oper)\n return False\n\n if cmp_func is None:\n cmp_func = version_cmp\n\n cmp_result = cmp_func(ver1, ver2, ignore_epoch=ignore_epoch)\n if cmp_result is None:\n return False\n\n # Check if integer/long\n if not isinstance(cmp_result, numbers.Integral):\n log.error('The version comparison function did not return an '\n 'integer/long.')\n return False\n\n if oper == '!=':\n return cmp_result not in cmp_map['==']\n else:\n # Gracefully handle cmp_result not in (-1, 0, 1).\n if cmp_result < -1:\n cmp_result = -1\n elif cmp_result > 1:\n cmp_result = 1\n\n return cmp_result in cmp_map[oper]\n"
] |
# -*- coding: utf-8 -*-
'''
Manage Chocolatey package installs
.. versionadded:: 2016.3.0
.. note::
Chocolatey pulls data from the Chocolatey internet database to determine
current versions, find available versions, etc. This is normally a slow
operation and may be optimized by specifying a local, smaller chocolatey
repo.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import SaltInvocationError
def __virtual__():
'''
Load only if chocolatey is loaded
'''
return 'chocolatey' if 'chocolatey.install' in __salt__ else False
def uninstalled(name, version=None, uninstall_args=None, override_args=False):
'''
Uninstalls a package
name
The name of the package to be uninstalled
version
Uninstalls a specific version of the package. Defaults to latest
version installed.
uninstall_args
A list of uninstall arguments you want to pass to the uninstallation
process i.e product key or feature list
override_args
Set to true if you want to override the original uninstall arguments (
for the native uninstaller)in the package and use your own.
When this is set to False uninstall_args will be appended to the end of
the default arguments
.. code-block: yaml
Removemypackage:
chocolatey.uninstalled:
- name: mypackage
- version: '21.5'
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Get list of currently installed packages
pre_uninstall = __salt__['chocolatey.list'](local_only=True)
# Determine if package is installed
if name.lower() in [package.lower() for package in pre_uninstall.keys()]:
try:
ret['changes'] = {
name: '{0} version {1} will be removed'.format(
name, pre_uninstall[name][0]
)
}
except KeyError:
ret['changes'] = {name: '{0} will be removed'.format(name)}
else:
ret['comment'] = 'The package {0} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The uninstall was tested'
return ret
# Uninstall the package
result = __salt__['chocolatey.uninstall'](name,
version,
uninstall_args,
override_args)
if 'Running chocolatey failed' not in result:
ret['result'] = True
else:
ret['result'] = False
if not ret['result']:
ret['comment'] = 'Failed to uninstall the package {0}'.format(name)
# Get list of installed packages after 'chocolatey.uninstall'
post_uninstall = __salt__['chocolatey.list'](local_only=True)
ret['changes'] = salt.utils.data.compare_dicts(pre_uninstall, post_uninstall)
return ret
def upgraded(name,
version=None,
source=None,
force=False,
pre_versions=False,
install_args=None,
override_args=False,
force_x86=False,
package_args=None):
'''
Upgrades a package. Will install the package if not installed.
.. versionadded:: 2018.3.0
Args:
name (str):
The name of the package to be installed. Required.
version (str):
Install a specific version of the package. Defaults to latest
version. If the version is greater than the one installed then the
specified version will be installed. Default is ``None``.
source (str):
Chocolatey repository (directory, share or remote URL, feed).
Defaults to the official Chocolatey feed. Default is ``None``.
force (bool):
``True`` will reinstall an existing package with the same version.
Default is ``False``.
pre_versions (bool):
``True`` will nclude pre-release packages. Default is ``False``.
install_args (str):
Install arguments you want to pass to the installation process, i.e
product key or feature list. Default is ``None``.
override_args (bool):
``True`` will override the original install arguments (for the
native installer) in the package and use those specified in
``install_args``. ``False`` will append install_args to the end of
the default arguments. Default is ``False``.
force_x86 (bool):
``True`` forces 32bit installation on 64 bit systems. Default is
``False``.
package_args (str):
Arguments you want to pass to the package. Default is ``None``.
.. code-block:: yaml
upgrade_some_package:
chocolatey.upgraded:
- name: packagename
- version: '12.04'
- source: 'mychocolatey/source'
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Get list of currently installed packages
pre_install = __salt__['chocolatey.list'](local_only=True)
# Determine if there are changes
# Package not installed
if name.lower() not in [package.lower() for package in pre_install.keys()]:
if version:
ret['changes'][name] = 'Version {0} will be installed'.format(version)
ret['comment'] = 'Install version {0}'.format(version)
else:
ret['changes'][name] = 'Latest version will be installed'
ret['comment'] = 'Install latest version'
# Package installed
else:
version_info = __salt__['chocolatey.version'](name, check_remote=True)
# Get the actual full name out of version_info
full_name = name
for pkg in version_info:
if name.lower() == pkg.lower():
full_name = pkg
installed_version = version_info[full_name]['installed'][0]
# If version is not passed, use available... if available is available
if not version:
if 'available' in version_info[full_name]:
version = version_info[full_name]['available'][0]
if version:
# If installed version and new version are the same
if salt.utils.versions.compare(
ver1=installed_version,
oper="==",
ver2=version):
if force:
ret['changes'][name] = 'Version {0} will be reinstalled'.format(version)
ret['comment'] = 'Reinstall {0} {1}'.format(full_name, version)
else:
ret['comment'] = '{0} {1} is already installed'.format(
name, installed_version
)
else:
# If installed version is older than new version
if salt.utils.versions.compare(
ver1=installed_version, oper="<", ver2=version):
ret['changes'][name] = 'Version {0} will be upgraded to Version {1}'.format(
installed_version, version
)
ret['comment'] = 'Upgrade {0} {1} to {2}'.format(
full_name, installed_version, version
)
# If installed version is newer than new version
else:
ret['comment'] = (
'{0} {1} (newer) is already installed'.format(
name, installed_version
)
)
# Catch all for a condition where version is not passed and there is no
# available version
else:
ret['comment'] = 'No version found to install'
# Return if there are no changes to be made
if not ret['changes']:
return ret
# Return if running in test mode
if __opts__['test']:
ret['result'] = None
return ret
# Install the package
result = __salt__['chocolatey.upgrade'](name=name,
version=version,
source=source,
force=force,
pre_versions=pre_versions,
install_args=install_args,
override_args=override_args,
force_x86=force_x86,
package_args=package_args)
if 'Running chocolatey failed' not in result:
ret['comment'] = 'Package {0} upgraded successfully'.format(name)
ret['result'] = True
else:
ret['comment'] = 'Failed to upgrade the package {0}'.format(name)
ret['result'] = False
# Get list of installed packages after 'chocolatey.install'
post_install = __salt__['chocolatey.list'](local_only=True)
# Prior to this, ret['changes'] would have contained expected changes,
# replace them with the actual changes now that we have completed the
# installation.
ret['changes'] = salt.utils.data.compare_dicts(pre_install, post_install)
return ret
|
saltstack/salt
|
salt/states/chocolatey.py
|
uninstalled
|
python
|
def uninstalled(name, version=None, uninstall_args=None, override_args=False):
'''
Uninstalls a package
name
The name of the package to be uninstalled
version
Uninstalls a specific version of the package. Defaults to latest
version installed.
uninstall_args
A list of uninstall arguments you want to pass to the uninstallation
process i.e product key or feature list
override_args
Set to true if you want to override the original uninstall arguments (
for the native uninstaller)in the package and use your own.
When this is set to False uninstall_args will be appended to the end of
the default arguments
.. code-block: yaml
Removemypackage:
chocolatey.uninstalled:
- name: mypackage
- version: '21.5'
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Get list of currently installed packages
pre_uninstall = __salt__['chocolatey.list'](local_only=True)
# Determine if package is installed
if name.lower() in [package.lower() for package in pre_uninstall.keys()]:
try:
ret['changes'] = {
name: '{0} version {1} will be removed'.format(
name, pre_uninstall[name][0]
)
}
except KeyError:
ret['changes'] = {name: '{0} will be removed'.format(name)}
else:
ret['comment'] = 'The package {0} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The uninstall was tested'
return ret
# Uninstall the package
result = __salt__['chocolatey.uninstall'](name,
version,
uninstall_args,
override_args)
if 'Running chocolatey failed' not in result:
ret['result'] = True
else:
ret['result'] = False
if not ret['result']:
ret['comment'] = 'Failed to uninstall the package {0}'.format(name)
# Get list of installed packages after 'chocolatey.uninstall'
post_uninstall = __salt__['chocolatey.list'](local_only=True)
ret['changes'] = salt.utils.data.compare_dicts(pre_uninstall, post_uninstall)
return ret
|
Uninstalls a package
name
The name of the package to be uninstalled
version
Uninstalls a specific version of the package. Defaults to latest
version installed.
uninstall_args
A list of uninstall arguments you want to pass to the uninstallation
process i.e product key or feature list
override_args
Set to true if you want to override the original uninstall arguments (
for the native uninstaller)in the package and use your own.
When this is set to False uninstall_args will be appended to the end of
the default arguments
.. code-block: yaml
Removemypackage:
chocolatey.uninstalled:
- name: mypackage
- version: '21.5'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/chocolatey.py#L202-L278
| null |
# -*- coding: utf-8 -*-
'''
Manage Chocolatey package installs
.. versionadded:: 2016.3.0
.. note::
Chocolatey pulls data from the Chocolatey internet database to determine
current versions, find available versions, etc. This is normally a slow
operation and may be optimized by specifying a local, smaller chocolatey
repo.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
import salt.utils.data
import salt.utils.versions
from salt.exceptions import SaltInvocationError
def __virtual__():
'''
Load only if chocolatey is loaded
'''
return 'chocolatey' if 'chocolatey.install' in __salt__ else False
def installed(name, version=None, source=None, force=False, pre_versions=False,
install_args=None, override_args=False, force_x86=False,
package_args=None, allow_multiple=False, execution_timeout=None):
'''
Installs a package if not already installed
Args:
name (str):
The name of the package to be installed. Required.
version (str):
Install a specific version of the package. Defaults to latest
version. If the version is different to the one installed then the
specified version will be installed. Default is None.
source (str):
Chocolatey repository (directory, share or remote URL, feed).
Defaults to the official Chocolatey feed. Default is None.
force (bool):
Reinstall the current version of an existing package. Do not use
with ``allow_multiple``. Default is False.
pre_versions (bool):
Include pre-release packages. Default is False.
install_args (str):
Install arguments you want to pass to the installation process, i.e
product key or feature list. Default is None.
override_args (bool):
Set to True if you want to override the original install arguments
(for the native installer) in the package and use your own. When
this is set to False install_args will be appended to the end of the
default arguments. Default is False.
force_x86 (bool):
Force x86 (32bit) installation on 64 bit systems. Default is False.
package_args (str):
Arguments you want to pass to the package. Default is None.
allow_multiple (bool):
Allow mulitiple versions of the package to be installed. Do not use
with ``force``. Does not work with all packages. Default is False.
.. versionadded:: 2017.7.0
execution_timeout (str):
Chocolatey execution timeout value you want to pass to the
installation process. Default is None.
.. code-block:: yaml
Installsomepackage:
chocolatey.installed:
- name: packagename
- version: '12.04'
- source: 'mychocolatey/source'
- force: True
'''
if force and allow_multiple:
raise SaltInvocationError(
'Cannot use \'force\' in conjunction with \'allow_multiple\'')
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Get list of currently installed packages
pre_install = __salt__['chocolatey.list'](local_only=True)
# Determine action
# Package not installed
if name.lower() not in [package.lower() for package in pre_install.keys()]:
if version:
ret['changes'] = {name: 'Version {0} will be installed'.format(version)}
else:
ret['changes'] = {name: 'Latest version will be installed'}
# Package installed
else:
version_info = __salt__['chocolatey.version'](name=name,
check_remote=True,
source=source)
full_name = name
for pkg in version_info:
if name.lower() == pkg.lower():
full_name = pkg
installed_version = version_info[full_name]['installed'][0]
if version:
if salt.utils.versions.compare(
ver1=installed_version, oper="==", ver2=version):
if force:
ret['changes'] = {
name: 'Version {0} will be reinstalled'.format(version)}
ret['comment'] = 'Reinstall {0} {1}'.format(full_name, version)
else:
ret['comment'] = '{0} {1} is already installed'.format(name, version)
if __opts__['test']:
ret['result'] = None
return ret
else:
if allow_multiple:
ret['changes'] = {
name: 'Version {0} will be installed side by side with '
'Version {1} if supported'.format(version, installed_version)
}
ret['comment'] = (
'Install {0} {1} side-by-side with {0} {2}'.format(
full_name, version, installed_version
)
)
else:
ret['changes'] = {
name: 'Version {0} will be installed over Version {1}'.format(version, installed_version)
}
ret['comment'] = 'Install {0} {1} over {0} {2}'.format(
full_name, version, installed_version
)
force = True
else:
version = installed_version
if force:
ret['changes'] = {
name: 'Version {0} will be reinstalled'.format(version)}
ret['comment'] = 'Reinstall {0} {1}'.format(full_name, version)
else:
ret['comment'] = '{0} {1} is already installed'.format(name, version)
if __opts__['test']:
ret['result'] = None
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The installation was tested'
return ret
# Install the package
result = __salt__['chocolatey.install'](name=name,
version=version,
source=source,
force=force,
pre_versions=pre_versions,
install_args=install_args,
override_args=override_args,
force_x86=force_x86,
package_args=package_args,
allow_multiple=allow_multiple,
execution_timeout=execution_timeout)
if 'Running chocolatey failed' not in result:
ret['result'] = True
else:
ret['result'] = False
if not ret['result']:
ret['comment'] = 'Failed to install the package {0}'.format(name)
# Get list of installed packages after 'chocolatey.install'
post_install = __salt__['chocolatey.list'](local_only=True)
ret['changes'] = salt.utils.data.compare_dicts(pre_install, post_install)
return ret
def upgraded(name,
version=None,
source=None,
force=False,
pre_versions=False,
install_args=None,
override_args=False,
force_x86=False,
package_args=None):
'''
Upgrades a package. Will install the package if not installed.
.. versionadded:: 2018.3.0
Args:
name (str):
The name of the package to be installed. Required.
version (str):
Install a specific version of the package. Defaults to latest
version. If the version is greater than the one installed then the
specified version will be installed. Default is ``None``.
source (str):
Chocolatey repository (directory, share or remote URL, feed).
Defaults to the official Chocolatey feed. Default is ``None``.
force (bool):
``True`` will reinstall an existing package with the same version.
Default is ``False``.
pre_versions (bool):
``True`` will nclude pre-release packages. Default is ``False``.
install_args (str):
Install arguments you want to pass to the installation process, i.e
product key or feature list. Default is ``None``.
override_args (bool):
``True`` will override the original install arguments (for the
native installer) in the package and use those specified in
``install_args``. ``False`` will append install_args to the end of
the default arguments. Default is ``False``.
force_x86 (bool):
``True`` forces 32bit installation on 64 bit systems. Default is
``False``.
package_args (str):
Arguments you want to pass to the package. Default is ``None``.
.. code-block:: yaml
upgrade_some_package:
chocolatey.upgraded:
- name: packagename
- version: '12.04'
- source: 'mychocolatey/source'
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
# Get list of currently installed packages
pre_install = __salt__['chocolatey.list'](local_only=True)
# Determine if there are changes
# Package not installed
if name.lower() not in [package.lower() for package in pre_install.keys()]:
if version:
ret['changes'][name] = 'Version {0} will be installed'.format(version)
ret['comment'] = 'Install version {0}'.format(version)
else:
ret['changes'][name] = 'Latest version will be installed'
ret['comment'] = 'Install latest version'
# Package installed
else:
version_info = __salt__['chocolatey.version'](name, check_remote=True)
# Get the actual full name out of version_info
full_name = name
for pkg in version_info:
if name.lower() == pkg.lower():
full_name = pkg
installed_version = version_info[full_name]['installed'][0]
# If version is not passed, use available... if available is available
if not version:
if 'available' in version_info[full_name]:
version = version_info[full_name]['available'][0]
if version:
# If installed version and new version are the same
if salt.utils.versions.compare(
ver1=installed_version,
oper="==",
ver2=version):
if force:
ret['changes'][name] = 'Version {0} will be reinstalled'.format(version)
ret['comment'] = 'Reinstall {0} {1}'.format(full_name, version)
else:
ret['comment'] = '{0} {1} is already installed'.format(
name, installed_version
)
else:
# If installed version is older than new version
if salt.utils.versions.compare(
ver1=installed_version, oper="<", ver2=version):
ret['changes'][name] = 'Version {0} will be upgraded to Version {1}'.format(
installed_version, version
)
ret['comment'] = 'Upgrade {0} {1} to {2}'.format(
full_name, installed_version, version
)
# If installed version is newer than new version
else:
ret['comment'] = (
'{0} {1} (newer) is already installed'.format(
name, installed_version
)
)
# Catch all for a condition where version is not passed and there is no
# available version
else:
ret['comment'] = 'No version found to install'
# Return if there are no changes to be made
if not ret['changes']:
return ret
# Return if running in test mode
if __opts__['test']:
ret['result'] = None
return ret
# Install the package
result = __salt__['chocolatey.upgrade'](name=name,
version=version,
source=source,
force=force,
pre_versions=pre_versions,
install_args=install_args,
override_args=override_args,
force_x86=force_x86,
package_args=package_args)
if 'Running chocolatey failed' not in result:
ret['comment'] = 'Package {0} upgraded successfully'.format(name)
ret['result'] = True
else:
ret['comment'] = 'Failed to upgrade the package {0}'.format(name)
ret['result'] = False
# Get list of installed packages after 'chocolatey.install'
post_install = __salt__['chocolatey.list'](local_only=True)
# Prior to this, ret['changes'] would have contained expected changes,
# replace them with the actual changes now that we have completed the
# installation.
ret['changes'] = salt.utils.data.compare_dicts(pre_install, post_install)
return ret
|
saltstack/salt
|
salt/utils/job.py
|
store_job
|
python
|
def store_job(opts, load, event=None, mminion=None):
'''
Store job information using the configured master_job_cache
'''
# Generate EndTime
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(opts))
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if not salt.utils.verify.valid_id(opts, load['id']):
return False
if mminion is None:
mminion = salt.minion.MasterMinion(opts, states=False, rend=False)
job_cache = opts['master_job_cache']
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['arg'] = load.get('arg', load.get('fun_args', []))
load['tgt_type'] = 'glob'
load['tgt'] = load['id']
prep_fstr = '{0}.prep_jid'.format(opts['master_job_cache'])
try:
load['jid'] = mminion.returners[prep_fstr](nocache=load.get('nocache', False))
except KeyError:
emsg = "Returner '{0}' does not support function prep_jid".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
# save the load, since we don't have it
saveload_fstr = '{0}.save_load'.format(job_cache)
try:
mminion.returners[saveload_fstr](load['jid'], load)
except KeyError:
emsg = "Returner '{0}' does not support function save_load".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
elif salt.utils.jid.is_jid(load['jid']):
# Store the jid
jidstore_fstr = '{0}.prep_jid'.format(job_cache)
try:
mminion.returners[jidstore_fstr](False, passed_jid=load['jid'])
except KeyError:
emsg = "Returner '{0}' does not support function prep_jid".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
if event:
# If the return data is invalid, just ignore it
log.info('Got return from %s for job %s', load['id'], load['jid'])
event.fire_event(load,
salt.utils.event.tagify([load['jid'], 'ret', load['id']], 'job'))
event.fire_ret_load(load)
# if you have a job_cache, or an ext_job_cache, don't write to
# the regular master cache
if not opts['job_cache'] or opts.get('ext_job_cache'):
return
# do not cache job results if explicitly requested
if load.get('jid') == 'nocache':
log.debug('Ignoring job return with jid for caching %s from %s',
load['jid'], load['id'])
return
# otherwise, write to the master cache
savefstr = '{0}.save_load'.format(job_cache)
getfstr = '{0}.get_load'.format(job_cache)
fstr = '{0}.returner'.format(job_cache)
updateetfstr = '{0}.update_endtime'.format(job_cache)
if 'fun' not in load and load.get('return', {}):
ret_ = load.get('return', {})
if 'fun' in ret_:
load.update({'fun': ret_['fun']})
if 'user' in ret_:
load.update({'user': ret_['user']})
# Try to reach returner methods
try:
savefstr_func = mminion.returners[savefstr]
getfstr_func = mminion.returners[getfstr]
fstr_func = mminion.returners[fstr]
except KeyError as error:
emsg = "Returner '{0}' does not support function {1}".format(job_cache, error)
log.error(emsg)
raise KeyError(emsg)
if job_cache != 'local_cache':
try:
mminion.returners[savefstr](load['jid'], load)
except KeyError as e:
log.error("Load does not contain 'jid': %s", e)
mminion.returners[fstr](load)
if (opts.get('job_cache_store_endtime')
and updateetfstr in mminion.returners):
mminion.returners[updateetfstr](load['jid'], endtime)
|
Store job information using the configured master_job_cache
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/job.py#L19-L116
|
[
"def valid_id(opts, id_):\n '''\n Returns if the passed id is valid\n '''\n try:\n if any(x in id_ for x in ('/', '\\\\', str('\\0'))):\n return False\n return bool(clean_path(opts['pki_dir'], id_))\n except (AttributeError, KeyError, TypeError, UnicodeDecodeError):\n return False\n",
"def gen_jid(opts=None):\n '''\n Generate a jid\n '''\n if opts is None:\n salt.utils.versions.warn_until(\n 'Sodium',\n 'The `opts` argument was not passed into salt.utils.jid.gen_jid(). '\n 'This will be required starting in {version}.'\n )\n opts = {}\n global LAST_JID_DATETIME # pylint: disable=global-statement\n\n if opts.get('utc_jid', False):\n jid_dt = datetime.datetime.utcnow()\n else:\n jid_dt = datetime.datetime.now()\n if not opts.get('unique_jid', False):\n return '{0:%Y%m%d%H%M%S%f}'.format(jid_dt)\n if LAST_JID_DATETIME and LAST_JID_DATETIME >= jid_dt:\n jid_dt = LAST_JID_DATETIME + datetime.timedelta(microseconds=1)\n LAST_JID_DATETIME = jid_dt\n return '{0:%Y%m%d%H%M%S%f}_{1}'.format(jid_dt, os.getpid())\n",
"def jid_to_time(jid):\n '''\n Convert a salt job id into the time when the job was invoked\n '''\n jid = six.text_type(jid)\n if len(jid) != 20 and (len(jid) <= 21 or jid[20] != '_'):\n return ''\n year = jid[:4]\n month = jid[4:6]\n day = jid[6:8]\n hour = jid[8:10]\n minute = jid[10:12]\n second = jid[12:14]\n micro = jid[14:20]\n\n ret = '{0}, {1} {2} {3}:{4}:{5}.{6}'.format(year,\n months[int(month)],\n day,\n hour,\n minute,\n second,\n micro)\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
Functions for interacting with the job cache
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import Salt libs
import salt.minion
import salt.utils.jid
import salt.utils.event
import salt.utils.verify
log = logging.getLogger(__name__)
def store_minions(opts, jid, minions, mminion=None, syndic_id=None):
'''
Store additional minions matched on lower-level masters using the configured
master_job_cache
'''
if mminion is None:
mminion = salt.minion.MasterMinion(opts, states=False, rend=False)
job_cache = opts['master_job_cache']
minions_fstr = '{0}.save_minions'.format(job_cache)
try:
mminion.returners[minions_fstr](jid, minions, syndic_id=syndic_id)
except KeyError:
raise KeyError(
'Returner \'{0}\' does not support function save_minions'.format(
job_cache
)
)
def get_retcode(ret):
'''
Determine a retcode for a given return
'''
retcode = 0
# if there is a dict with retcode, use that
if isinstance(ret, dict) and ret.get('retcode', 0) != 0:
return ret['retcode']
# if its a boolean, False means 1
elif isinstance(ret, bool) and not ret:
return 1
return retcode
# vim:set et sts=4 ts=4 tw=80:
|
saltstack/salt
|
salt/utils/job.py
|
store_minions
|
python
|
def store_minions(opts, jid, minions, mminion=None, syndic_id=None):
'''
Store additional minions matched on lower-level masters using the configured
master_job_cache
'''
if mminion is None:
mminion = salt.minion.MasterMinion(opts, states=False, rend=False)
job_cache = opts['master_job_cache']
minions_fstr = '{0}.save_minions'.format(job_cache)
try:
mminion.returners[minions_fstr](jid, minions, syndic_id=syndic_id)
except KeyError:
raise KeyError(
'Returner \'{0}\' does not support function save_minions'.format(
job_cache
)
)
|
Store additional minions matched on lower-level masters using the configured
master_job_cache
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/job.py#L119-L136
| null |
# -*- coding: utf-8 -*-
'''
Functions for interacting with the job cache
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import Salt libs
import salt.minion
import salt.utils.jid
import salt.utils.event
import salt.utils.verify
log = logging.getLogger(__name__)
def store_job(opts, load, event=None, mminion=None):
'''
Store job information using the configured master_job_cache
'''
# Generate EndTime
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(opts))
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if not salt.utils.verify.valid_id(opts, load['id']):
return False
if mminion is None:
mminion = salt.minion.MasterMinion(opts, states=False, rend=False)
job_cache = opts['master_job_cache']
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['arg'] = load.get('arg', load.get('fun_args', []))
load['tgt_type'] = 'glob'
load['tgt'] = load['id']
prep_fstr = '{0}.prep_jid'.format(opts['master_job_cache'])
try:
load['jid'] = mminion.returners[prep_fstr](nocache=load.get('nocache', False))
except KeyError:
emsg = "Returner '{0}' does not support function prep_jid".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
# save the load, since we don't have it
saveload_fstr = '{0}.save_load'.format(job_cache)
try:
mminion.returners[saveload_fstr](load['jid'], load)
except KeyError:
emsg = "Returner '{0}' does not support function save_load".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
elif salt.utils.jid.is_jid(load['jid']):
# Store the jid
jidstore_fstr = '{0}.prep_jid'.format(job_cache)
try:
mminion.returners[jidstore_fstr](False, passed_jid=load['jid'])
except KeyError:
emsg = "Returner '{0}' does not support function prep_jid".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
if event:
# If the return data is invalid, just ignore it
log.info('Got return from %s for job %s', load['id'], load['jid'])
event.fire_event(load,
salt.utils.event.tagify([load['jid'], 'ret', load['id']], 'job'))
event.fire_ret_load(load)
# if you have a job_cache, or an ext_job_cache, don't write to
# the regular master cache
if not opts['job_cache'] or opts.get('ext_job_cache'):
return
# do not cache job results if explicitly requested
if load.get('jid') == 'nocache':
log.debug('Ignoring job return with jid for caching %s from %s',
load['jid'], load['id'])
return
# otherwise, write to the master cache
savefstr = '{0}.save_load'.format(job_cache)
getfstr = '{0}.get_load'.format(job_cache)
fstr = '{0}.returner'.format(job_cache)
updateetfstr = '{0}.update_endtime'.format(job_cache)
if 'fun' not in load and load.get('return', {}):
ret_ = load.get('return', {})
if 'fun' in ret_:
load.update({'fun': ret_['fun']})
if 'user' in ret_:
load.update({'user': ret_['user']})
# Try to reach returner methods
try:
savefstr_func = mminion.returners[savefstr]
getfstr_func = mminion.returners[getfstr]
fstr_func = mminion.returners[fstr]
except KeyError as error:
emsg = "Returner '{0}' does not support function {1}".format(job_cache, error)
log.error(emsg)
raise KeyError(emsg)
if job_cache != 'local_cache':
try:
mminion.returners[savefstr](load['jid'], load)
except KeyError as e:
log.error("Load does not contain 'jid': %s", e)
mminion.returners[fstr](load)
if (opts.get('job_cache_store_endtime')
and updateetfstr in mminion.returners):
mminion.returners[updateetfstr](load['jid'], endtime)
def get_retcode(ret):
'''
Determine a retcode for a given return
'''
retcode = 0
# if there is a dict with retcode, use that
if isinstance(ret, dict) and ret.get('retcode', 0) != 0:
return ret['retcode']
# if its a boolean, False means 1
elif isinstance(ret, bool) and not ret:
return 1
return retcode
# vim:set et sts=4 ts=4 tw=80:
|
saltstack/salt
|
salt/utils/job.py
|
get_retcode
|
python
|
def get_retcode(ret):
'''
Determine a retcode for a given return
'''
retcode = 0
# if there is a dict with retcode, use that
if isinstance(ret, dict) and ret.get('retcode', 0) != 0:
return ret['retcode']
# if its a boolean, False means 1
elif isinstance(ret, bool) and not ret:
return 1
return retcode
|
Determine a retcode for a given return
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/job.py#L139-L150
| null |
# -*- coding: utf-8 -*-
'''
Functions for interacting with the job cache
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
# Import Salt libs
import salt.minion
import salt.utils.jid
import salt.utils.event
import salt.utils.verify
log = logging.getLogger(__name__)
def store_job(opts, load, event=None, mminion=None):
'''
Store job information using the configured master_job_cache
'''
# Generate EndTime
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid(opts))
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if not salt.utils.verify.valid_id(opts, load['id']):
return False
if mminion is None:
mminion = salt.minion.MasterMinion(opts, states=False, rend=False)
job_cache = opts['master_job_cache']
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
load['arg'] = load.get('arg', load.get('fun_args', []))
load['tgt_type'] = 'glob'
load['tgt'] = load['id']
prep_fstr = '{0}.prep_jid'.format(opts['master_job_cache'])
try:
load['jid'] = mminion.returners[prep_fstr](nocache=load.get('nocache', False))
except KeyError:
emsg = "Returner '{0}' does not support function prep_jid".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
# save the load, since we don't have it
saveload_fstr = '{0}.save_load'.format(job_cache)
try:
mminion.returners[saveload_fstr](load['jid'], load)
except KeyError:
emsg = "Returner '{0}' does not support function save_load".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
elif salt.utils.jid.is_jid(load['jid']):
# Store the jid
jidstore_fstr = '{0}.prep_jid'.format(job_cache)
try:
mminion.returners[jidstore_fstr](False, passed_jid=load['jid'])
except KeyError:
emsg = "Returner '{0}' does not support function prep_jid".format(job_cache)
log.error(emsg)
raise KeyError(emsg)
if event:
# If the return data is invalid, just ignore it
log.info('Got return from %s for job %s', load['id'], load['jid'])
event.fire_event(load,
salt.utils.event.tagify([load['jid'], 'ret', load['id']], 'job'))
event.fire_ret_load(load)
# if you have a job_cache, or an ext_job_cache, don't write to
# the regular master cache
if not opts['job_cache'] or opts.get('ext_job_cache'):
return
# do not cache job results if explicitly requested
if load.get('jid') == 'nocache':
log.debug('Ignoring job return with jid for caching %s from %s',
load['jid'], load['id'])
return
# otherwise, write to the master cache
savefstr = '{0}.save_load'.format(job_cache)
getfstr = '{0}.get_load'.format(job_cache)
fstr = '{0}.returner'.format(job_cache)
updateetfstr = '{0}.update_endtime'.format(job_cache)
if 'fun' not in load and load.get('return', {}):
ret_ = load.get('return', {})
if 'fun' in ret_:
load.update({'fun': ret_['fun']})
if 'user' in ret_:
load.update({'user': ret_['user']})
# Try to reach returner methods
try:
savefstr_func = mminion.returners[savefstr]
getfstr_func = mminion.returners[getfstr]
fstr_func = mminion.returners[fstr]
except KeyError as error:
emsg = "Returner '{0}' does not support function {1}".format(job_cache, error)
log.error(emsg)
raise KeyError(emsg)
if job_cache != 'local_cache':
try:
mminion.returners[savefstr](load['jid'], load)
except KeyError as e:
log.error("Load does not contain 'jid': %s", e)
mminion.returners[fstr](load)
if (opts.get('job_cache_store_endtime')
and updateetfstr in mminion.returners):
mminion.returners[updateetfstr](load['jid'], endtime)
def store_minions(opts, jid, minions, mminion=None, syndic_id=None):
'''
Store additional minions matched on lower-level masters using the configured
master_job_cache
'''
if mminion is None:
mminion = salt.minion.MasterMinion(opts, states=False, rend=False)
job_cache = opts['master_job_cache']
minions_fstr = '{0}.save_minions'.format(job_cache)
try:
mminion.returners[minions_fstr](jid, minions, syndic_id=syndic_id)
except KeyError:
raise KeyError(
'Returner \'{0}\' does not support function save_minions'.format(
job_cache
)
)
# vim:set et sts=4 ts=4 tw=80:
|
saltstack/salt
|
salt/beacons/aix_account.py
|
beacon
|
python
|
def beacon(config):
'''
Checks for locked accounts due to too many invalid login attempts, 3 or higher.
.. code-block:: yaml
beacons:
aix_account:
user: ALL
interval: 120
'''
ret = []
user = config['user']
locked_accounts = __salt__['shadow.login_failures'](user)
ret.append({'accounts': locked_accounts})
return ret
|
Checks for locked accounts due to too many invalid login attempts, 3 or higher.
.. code-block:: yaml
beacons:
aix_account:
user: ALL
interval: 120
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/aix_account.py#L43-L63
| null |
# -*- coding: utf-8 -*-
'''
Beacon to fire event when we notice a AIX user is locked due to many failed login attempts.
.. versionadded:: 2018.3.0
:depends: none
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals
import logging
log = logging.getLogger(__name__)
__virtualname__ = 'aix_account'
def __virtual__():
'''
Only load if kernel is AIX
'''
if __grains__['kernel'] == ('AIX'):
return __virtualname__
return (False, 'The aix_account beacon module failed to load: '
'only available on AIX systems.')
def validate(config):
'''
Validate the beacon configuration
'''
# Configuration for aix_account beacon should be a dictionary
if not isinstance(config, dict):
return False, ('Configuration for aix_account beacon must be a dict.')
if 'user' not in config:
return False, ('Configuration for aix_account beacon must '
'include a user or ALL for all users.')
return True, 'Valid beacon configuration'
|
saltstack/salt
|
salt/modules/opsgenie.py
|
post_data
|
python
|
def post_data(api_key=None, name='OpsGenie Execution Module', reason=None,
action_type=None):
'''
Post data to OpsGenie. It's designed for Salt's Event Reactor.
After configuring the sls reaction file as shown above, you can trigger the
module with your designated tag (og-tag in this case).
CLI Example:
.. code-block:: bash
salt-call event.send 'og-tag' '{"reason" : "Overheating CPU!"}'
Required parameters:
api_key
It's the API Key you've copied while adding integration in OpsGenie.
reason
It will be used as alert's default message in OpsGenie.
action_type
OpsGenie supports the default values Create/Close for action_type. You
can customize this field with OpsGenie's custom actions for other
purposes like adding notes or acknowledging alerts.
Optional parameters:
name
It will be used as alert's alias. If you want to use the close
functionality you must provide name field for both states like in
this case.
'''
if api_key is None or reason is None:
raise salt.exceptions.SaltInvocationError(
'API Key or Reason cannot be None.')
data = dict()
data['alias'] = name
data['message'] = reason
# data['actions'] = action_type
data['cpuModel'] = __grains__['cpu_model']
data['cpuArch'] = __grains__['cpuarch']
data['fqdn'] = __grains__['fqdn']
data['host'] = __grains__['host']
data['id'] = __grains__['id']
data['kernel'] = __grains__['kernel']
data['kernelRelease'] = __grains__['kernelrelease']
data['master'] = __grains__['master']
data['os'] = __grains__['os']
data['saltPath'] = __grains__['saltpath']
data['saltVersion'] = __grains__['saltversion']
data['username'] = __grains__['username']
data['uuid'] = __grains__['uuid']
log.debug('Below data will be posted:\n%s', data)
log.debug('API Key: %s \t API Endpoint: %s', api_key, API_ENDPOINT)
if action_type == "Create":
response = requests.post(
url=API_ENDPOINT,
data=salt.utils.json.dumps(data),
headers={'Content-Type': 'application/json',
'Authorization': 'GenieKey ' + api_key})
else:
response = requests.post(
url=API_ENDPOINT + "/" + name + "/close?identifierType=alias",
data=salt.utils.json.dumps(data),
headers={'Content-Type': 'application/json',
'Authorization': 'GenieKey ' + api_key})
return response.status_code, response.text
|
Post data to OpsGenie. It's designed for Salt's Event Reactor.
After configuring the sls reaction file as shown above, you can trigger the
module with your designated tag (og-tag in this case).
CLI Example:
.. code-block:: bash
salt-call event.send 'og-tag' '{"reason" : "Overheating CPU!"}'
Required parameters:
api_key
It's the API Key you've copied while adding integration in OpsGenie.
reason
It will be used as alert's default message in OpsGenie.
action_type
OpsGenie supports the default values Create/Close for action_type. You
can customize this field with OpsGenie's custom actions for other
purposes like adding notes or acknowledging alerts.
Optional parameters:
name
It will be used as alert's alias. If you want to use the close
functionality you must provide name field for both states like in
this case.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/opsgenie.py#L37-L109
|
[
"def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n"
] |
# -*- coding: utf-8 -*-
'''
Module for sending data to OpsGenie
.. versionadded:: 2018.3.0
:configuration: This module can be used in Reactor System for
posting data to OpsGenie as a remote-execution function.
For example:
.. code-block:: yaml
opsgenie_event_poster:
local.opsgenie.post_data:
- tgt: 'salt-minion'
- kwarg:
name: event.reactor
api_key: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
reason: {{ data['data']['reason'] }}
action_type: Create
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import requests
# Import Salt libs
import salt.exceptions
import salt.utils.json
API_ENDPOINT = "https://api.opsgenie.com/v2/alerts"
log = logging.getLogger(__name__)
|
saltstack/salt
|
salt/states/macpackage.py
|
installed
|
python
|
def installed(name, target="LocalSystem", dmg=False, store=False, app=False, mpkg=False, user=None, onlyif=None,
unless=None, force=False, allow_untrusted=False, version_check=None):
'''
Install a Mac OS Package from a pkg or dmg file, if given a dmg file it
will first be mounted in a temporary location
name
The pkg or dmg file to install
target
The location in which to install the package. This can be a path or LocalSystem
dmg
Is the given file a dmg file?
store
Should the pkg be installed as if it was from the Mac OS Store?
app
Is the file a .app? If so then we'll just copy that to /Applications/ or the given
target
mpkg
Is the file a .mpkg? If so then we'll check all of the .pkg files found are installed
user
Name of the user performing the unless or onlyif checks
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
force
Force the package to be installed even if its already been found installed
allow_untrusted
Allow the installation of untrusted packages
version_check
The command and version that we want to check against, the version number can use regex.
.. code-block:: yaml
version_check: python --version_check=2.7.[0-9]
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
found = []
installing = []
real_pkg = name
# Check onlyif, unless first
run_check_cmd_kwargs = {'runas': user, 'python_shell': True}
if 'shell' in __grains__:
run_check_cmd_kwargs['shell'] = __grains__['shell']
cret = _mod_run_check(run_check_cmd_kwargs, onlyif, unless)
if isinstance(cret, dict):
ret.update(cret)
return ret
# Check version info
if version_check is not None:
split = version_check.split("=")
if len(split) == 2:
version_cmd = split[0]
expected_version = split[1]
try:
version_out = __salt__['cmd.run'](version_cmd, output_loglevel="quiet", ignore_retcode=True)
version_out = version_out.strip()
except CommandExecutionError:
version_out = ""
if re.match(expected_version, version_out) is not None:
ret['comment'] += "Version already matches {0}".format(expected_version)
return ret
else:
ret['comment'] += "Version {0} doesn't match {1}. ".format(version_out, expected_version)
if app and target == "LocalSystem":
target = "/Applications/"
# Mount the dmg first
mount_point = None
if dmg:
out, mount_point = __salt__['macpackage.mount'](name)
if 'attach failed' in out:
ret['result'] = False
ret['comment'] += 'Unable to mount {0}'.format(name)
return ret
if app:
real_pkg = mount_point + "/*.app"
elif mpkg:
real_pkg = mount_point + "/*.mpkg"
else:
real_pkg = mount_point + "/*.pkg"
try:
# Check if we have already installed this
if app:
if dmg:
# Run with python shell due to the wildcard
cmd = 'ls -d *.app'
out = __salt__['cmd.run'](cmd, cwd=mount_point, python_shell=True)
if '.app' not in out:
ret['result'] = False
ret['comment'] += 'Unable to find .app in {0}'.format(mount_point)
return ret
else:
pkg_ids = out.split("\n")
else:
pkg_ids = [os.path.basename(name)]
mount_point = os.path.dirname(name)
if onlyif is None and unless is None and version_check is None:
for p in pkg_ids:
if target[-4:] == ".app":
install_dir = target
else:
install_dir = os.path.join(target, p)
if os.path.exists(install_dir) and force is False:
found.append(p)
else:
installing.append(p)
else:
installing = pkg_ids
else:
installed_pkgs = __salt__['macpackage.installed_pkgs']()
if mpkg:
pkg_ids = __salt__['macpackage.get_mpkg_ids'](real_pkg)
else:
pkg_ids = __salt__['macpackage.get_pkg_id'](real_pkg)
if pkg_ids:
for p in pkg_ids:
if p in installed_pkgs and force is False:
found.append(p)
else:
installing.append(p)
if len(pkg_ids) == len(found):
return ret
if app:
def failed_pkg(f_pkg):
ret['result'] = False
ret['comment'] += '{0} failed to install: {1}'.format(name, out)
if 'failed' in ret['changes']:
ret['changes']['failed'].append(f_pkg)
else:
ret['changes']['failed'] = [f_pkg]
for app in installing:
try:
log.info('Copying %s to %s', app, target)
out = __salt__['macpackage.install_app'](os.path.join(mount_point, app), target)
if out:
failed_pkg(app)
else:
ret['comment'] += '{0} installed'.format(app)
if 'installed' in ret['changes']:
ret['changes']['installed'].append(app)
else:
ret['changes']['installed'] = [app]
except OSError:
failed_pkg(app)
else:
out = __salt__['macpackage.install'](real_pkg, target, store, allow_untrusted)
if out['retcode'] != 0:
ret['result'] = False
ret['comment'] += '. {0} failed to install: {1}'.format(name, out)
else:
ret['comment'] += '{0} installed'.format(name)
ret['changes']['installed'] = installing
finally:
if dmg:
# Unmount to be kind
__salt__['macpackage.unmount'](mount_point)
return ret
|
Install a Mac OS Package from a pkg or dmg file, if given a dmg file it
will first be mounted in a temporary location
name
The pkg or dmg file to install
target
The location in which to install the package. This can be a path or LocalSystem
dmg
Is the given file a dmg file?
store
Should the pkg be installed as if it was from the Mac OS Store?
app
Is the file a .app? If so then we'll just copy that to /Applications/ or the given
target
mpkg
Is the file a .mpkg? If so then we'll check all of the .pkg files found are installed
user
Name of the user performing the unless or onlyif checks
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
force
Force the package to be installed even if its already been found installed
allow_untrusted
Allow the installation of untrusted packages
version_check
The command and version that we want to check against, the version number can use regex.
.. code-block:: yaml
version_check: python --version_check=2.7.[0-9]
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/macpackage.py#L49-L245
|
[
"def _mod_run_check(cmd_kwargs, onlyif, unless):\n '''\n Execute the onlyif and unless logic.\n Return a result dict if:\n * onlyif failed (onlyif != 0)\n * unless succeeded (unless == 0)\n else return True\n '''\n if onlyif:\n if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0:\n return {'comment': 'onlyif condition is false',\n 'skip_watch': True,\n 'result': True}\n\n if unless:\n if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0:\n return {'comment': 'unless condition is true',\n 'skip_watch': True,\n 'result': True}\n\n # No reason to stop, return True\n return True\n",
"def failed_pkg(f_pkg):\n ret['result'] = False\n ret['comment'] += '{0} failed to install: {1}'.format(name, out)\n\n if 'failed' in ret['changes']:\n ret['changes']['failed'].append(f_pkg)\n else:\n ret['changes']['failed'] = [f_pkg]\n"
] |
# -*- coding: utf-8 -*-
'''
Installing of mac pkg files
===========================
Install any kind of pkg, dmg or app file on macOS:
.. code-block:: yaml
/mnt/test.pkg:
macpackage.installed:
- store: True
/mnt/test.dmg:
macpackage.installed:
- dmg: True
/mnt/xcode.dmg:
macpackage.installed:
- dmg: True
- app: True
- target: /Applications/Xcode.app
- version_check: xcodebuild -version=Xcode 7.1\\n.*7B91b
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
import os
import re
# Import Salt libs
import salt.utils.platform
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = "macpackage"
def __virtual__():
'''
Only work on Mac OS
'''
if salt.utils.platform.is_darwin():
return __virtualname__
return False
def _mod_run_check(cmd_kwargs, onlyif, unless):
'''
Execute the onlyif and unless logic.
Return a result dict if:
* onlyif failed (onlyif != 0)
* unless succeeded (unless == 0)
else return True
'''
if onlyif:
if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0:
return {'comment': 'onlyif condition is false',
'skip_watch': True,
'result': True}
if unless:
if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0:
return {'comment': 'unless condition is true',
'skip_watch': True,
'result': True}
# No reason to stop, return True
return True
|
saltstack/salt
|
salt/states/macpackage.py
|
_mod_run_check
|
python
|
def _mod_run_check(cmd_kwargs, onlyif, unless):
'''
Execute the onlyif and unless logic.
Return a result dict if:
* onlyif failed (onlyif != 0)
* unless succeeded (unless == 0)
else return True
'''
if onlyif:
if __salt__['cmd.retcode'](onlyif, **cmd_kwargs) != 0:
return {'comment': 'onlyif condition is false',
'skip_watch': True,
'result': True}
if unless:
if __salt__['cmd.retcode'](unless, **cmd_kwargs) == 0:
return {'comment': 'unless condition is true',
'skip_watch': True,
'result': True}
# No reason to stop, return True
return True
|
Execute the onlyif and unless logic.
Return a result dict if:
* onlyif failed (onlyif != 0)
* unless succeeded (unless == 0)
else return True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/macpackage.py#L248-L269
| null |
# -*- coding: utf-8 -*-
'''
Installing of mac pkg files
===========================
Install any kind of pkg, dmg or app file on macOS:
.. code-block:: yaml
/mnt/test.pkg:
macpackage.installed:
- store: True
/mnt/test.dmg:
macpackage.installed:
- dmg: True
/mnt/xcode.dmg:
macpackage.installed:
- dmg: True
- app: True
- target: /Applications/Xcode.app
- version_check: xcodebuild -version=Xcode 7.1\\n.*7B91b
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
import os
import re
# Import Salt libs
import salt.utils.platform
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__virtualname__ = "macpackage"
def __virtual__():
'''
Only work on Mac OS
'''
if salt.utils.platform.is_darwin():
return __virtualname__
return False
def installed(name, target="LocalSystem", dmg=False, store=False, app=False, mpkg=False, user=None, onlyif=None,
unless=None, force=False, allow_untrusted=False, version_check=None):
'''
Install a Mac OS Package from a pkg or dmg file, if given a dmg file it
will first be mounted in a temporary location
name
The pkg or dmg file to install
target
The location in which to install the package. This can be a path or LocalSystem
dmg
Is the given file a dmg file?
store
Should the pkg be installed as if it was from the Mac OS Store?
app
Is the file a .app? If so then we'll just copy that to /Applications/ or the given
target
mpkg
Is the file a .mpkg? If so then we'll check all of the .pkg files found are installed
user
Name of the user performing the unless or onlyif checks
onlyif
A command to run as a check, run the named command only if the command
passed to the ``onlyif`` option returns true
unless
A command to run as a check, only run the named command if the command
passed to the ``unless`` option returns false
force
Force the package to be installed even if its already been found installed
allow_untrusted
Allow the installation of untrusted packages
version_check
The command and version that we want to check against, the version number can use regex.
.. code-block:: yaml
version_check: python --version_check=2.7.[0-9]
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
found = []
installing = []
real_pkg = name
# Check onlyif, unless first
run_check_cmd_kwargs = {'runas': user, 'python_shell': True}
if 'shell' in __grains__:
run_check_cmd_kwargs['shell'] = __grains__['shell']
cret = _mod_run_check(run_check_cmd_kwargs, onlyif, unless)
if isinstance(cret, dict):
ret.update(cret)
return ret
# Check version info
if version_check is not None:
split = version_check.split("=")
if len(split) == 2:
version_cmd = split[0]
expected_version = split[1]
try:
version_out = __salt__['cmd.run'](version_cmd, output_loglevel="quiet", ignore_retcode=True)
version_out = version_out.strip()
except CommandExecutionError:
version_out = ""
if re.match(expected_version, version_out) is not None:
ret['comment'] += "Version already matches {0}".format(expected_version)
return ret
else:
ret['comment'] += "Version {0} doesn't match {1}. ".format(version_out, expected_version)
if app and target == "LocalSystem":
target = "/Applications/"
# Mount the dmg first
mount_point = None
if dmg:
out, mount_point = __salt__['macpackage.mount'](name)
if 'attach failed' in out:
ret['result'] = False
ret['comment'] += 'Unable to mount {0}'.format(name)
return ret
if app:
real_pkg = mount_point + "/*.app"
elif mpkg:
real_pkg = mount_point + "/*.mpkg"
else:
real_pkg = mount_point + "/*.pkg"
try:
# Check if we have already installed this
if app:
if dmg:
# Run with python shell due to the wildcard
cmd = 'ls -d *.app'
out = __salt__['cmd.run'](cmd, cwd=mount_point, python_shell=True)
if '.app' not in out:
ret['result'] = False
ret['comment'] += 'Unable to find .app in {0}'.format(mount_point)
return ret
else:
pkg_ids = out.split("\n")
else:
pkg_ids = [os.path.basename(name)]
mount_point = os.path.dirname(name)
if onlyif is None and unless is None and version_check is None:
for p in pkg_ids:
if target[-4:] == ".app":
install_dir = target
else:
install_dir = os.path.join(target, p)
if os.path.exists(install_dir) and force is False:
found.append(p)
else:
installing.append(p)
else:
installing = pkg_ids
else:
installed_pkgs = __salt__['macpackage.installed_pkgs']()
if mpkg:
pkg_ids = __salt__['macpackage.get_mpkg_ids'](real_pkg)
else:
pkg_ids = __salt__['macpackage.get_pkg_id'](real_pkg)
if pkg_ids:
for p in pkg_ids:
if p in installed_pkgs and force is False:
found.append(p)
else:
installing.append(p)
if len(pkg_ids) == len(found):
return ret
if app:
def failed_pkg(f_pkg):
ret['result'] = False
ret['comment'] += '{0} failed to install: {1}'.format(name, out)
if 'failed' in ret['changes']:
ret['changes']['failed'].append(f_pkg)
else:
ret['changes']['failed'] = [f_pkg]
for app in installing:
try:
log.info('Copying %s to %s', app, target)
out = __salt__['macpackage.install_app'](os.path.join(mount_point, app), target)
if out:
failed_pkg(app)
else:
ret['comment'] += '{0} installed'.format(app)
if 'installed' in ret['changes']:
ret['changes']['installed'].append(app)
else:
ret['changes']['installed'] = [app]
except OSError:
failed_pkg(app)
else:
out = __salt__['macpackage.install'](real_pkg, target, store, allow_untrusted)
if out['retcode'] != 0:
ret['result'] = False
ret['comment'] += '. {0} failed to install: {1}'.format(name, out)
else:
ret['comment'] += '{0} installed'.format(name)
ret['changes']['installed'] = installing
finally:
if dmg:
# Unmount to be kind
__salt__['macpackage.unmount'](mount_point)
return ret
|
saltstack/salt
|
salt/modules/rest_service.py
|
status
|
python
|
def status(name, sig=None):
'''
Return the status for a service via rest_sample.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionadded:: 2015.8.0
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Not implemented
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
'''
proxy_fn = 'rest_sample.service_status'
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
resp = __proxy__[proxy_fn](service)
if resp['comment'] == 'running':
results[service] = True
else:
results[service] = False
if contains_globbing:
return results
return results[name]
|
Return the status for a service via rest_sample.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionadded:: 2015.8.0
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Not implemented
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name>
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rest_service.py#L128-L169
|
[
"def get_all():\n '''\n Return a list of all available services\n\n .. versionadded:: 2015.8.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.get_all\n '''\n proxy_fn = 'rest_sample.service_list'\n return __proxy__[proxy_fn]()\n"
] |
# -*- coding: utf-8 -*-
'''
Provide the service module for the proxy-minion REST sample
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import logging
import fnmatch
import re
# Import Salt libs
import salt.utils.platform
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list'
}
# Define the module's virtual name
__virtualname__ = 'service'
def __virtual__():
'''
Only work on systems that are a proxy minion
'''
try:
if salt.utils.platform.is_proxy() \
and __opts__['proxy']['proxytype'] == 'rest_sample':
return __virtualname__
except KeyError:
return (
False,
'The rest_service execution module failed to load. Check the '
'proxy key in pillar.'
)
return (
False,
'The rest_service execution module failed to load: only works on a '
'rest_sample proxy minion.'
)
def get_all():
'''
Return a list of all available services
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
proxy_fn = 'rest_sample.service_list'
return __proxy__[proxy_fn]()
def list_():
'''
Return a list of all available services.
.. versionadded: 2015.8.1
CLI Example:
.. code-block:: bash
salt '*' service.list
'''
return get_all()
def start(name, sig=None):
'''
Start the specified service on the rest_sample
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
proxy_fn = 'rest_sample.service_start'
return __proxy__[proxy_fn](name)
def stop(name, sig=None):
'''
Stop the specified service on the rest_sample
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
proxy_fn = 'rest_sample.service_stop'
return __proxy__[proxy_fn](name)
def restart(name, sig=None):
'''
Restart the specified service with rest_sample
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
proxy_fn = 'rest_sample.service_restart'
return __proxy__[proxy_fn](name)
def running(name, sig=None):
'''
Return whether this service is running.
.. versionadded:: 2015.8.0
'''
return status(name).get(name, False)
def enabled(name, sig=None):
'''
Only the 'redbull' service is 'enabled' in the test
.. versionadded:: 2015.8.1
'''
return name == 'redbull'
|
saltstack/salt
|
salt/utils/schedule.py
|
clean_proc_dir
|
python
|
def clean_proc_dir(opts):
'''
Loop through jid files in the minion proc directory (default /var/cache/salt/minion/proc)
and remove any that refer to processes that no longer exist
'''
for basefilename in os.listdir(salt.minion.get_proc_dir(opts['cachedir'])):
fn_ = os.path.join(salt.minion.get_proc_dir(opts['cachedir']), basefilename)
with salt.utils.files.fopen(fn_, 'rb') as fp_:
job = None
try:
job = salt.payload.Serial(opts).load(fp_)
except Exception: # It's corrupted
# Windows cannot delete an open file
if salt.utils.platform.is_windows():
fp_.close()
try:
os.unlink(fn_)
continue
except OSError:
continue
log.debug(
'schedule.clean_proc_dir: checking job %s for process '
'existence', job
)
if job is not None and 'pid' in job:
if salt.utils.process.os_is_running(job['pid']):
log.debug(
'schedule.clean_proc_dir: Cleaning proc dir, pid %s '
'still exists.', job['pid']
)
else:
# Windows cannot delete an open file
if salt.utils.platform.is_windows():
fp_.close()
# Maybe the file is already gone
try:
os.unlink(fn_)
except OSError:
pass
|
Loop through jid files in the minion proc directory (default /var/cache/salt/minion/proc)
and remove any that refer to processes that no longer exist
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L1693-L1733
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def get_proc_dir(cachedir, **kwargs):\n '''\n Given the cache directory, return the directory that process data is\n stored in, creating it if it doesn't exist.\n The following optional Keyword Arguments are handled:\n\n mode: which is anything os.makedir would accept as mode.\n\n uid: the uid to set, if not set, or it is None or -1 no changes are\n made. Same applies if the directory is already owned by this\n uid. Must be int. Works only on unix/unix like systems.\n\n gid: the gid to set, if not set, or it is None or -1 no changes are\n made. Same applies if the directory is already owned by this\n gid. Must be int. Works only on unix/unix like systems.\n '''\n fn_ = os.path.join(cachedir, 'proc')\n mode = kwargs.pop('mode', None)\n\n if mode is None:\n mode = {}\n else:\n mode = {'mode': mode}\n\n if not os.path.isdir(fn_):\n # proc_dir is not present, create it with mode settings\n os.makedirs(fn_, **mode)\n\n d_stat = os.stat(fn_)\n\n # if mode is not an empty dict then we have an explicit\n # dir mode. So lets check if mode needs to be changed.\n if mode:\n mode_part = S_IMODE(d_stat.st_mode)\n if mode_part != mode['mode']:\n os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])\n\n if hasattr(os, 'chown'):\n # only on unix/unix like systems\n uid = kwargs.pop('uid', -1)\n gid = kwargs.pop('gid', -1)\n\n # if uid and gid are both -1 then go ahead with\n # no changes at all\n if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \\\n [i for i in (uid, gid) if i != -1]:\n os.chown(fn_, uid, gid)\n\n return fn_\n",
"def os_is_running(pid):\n '''\n Use OS facilities to determine if a process is running\n '''\n if isinstance(pid, six.string_types):\n pid = int(pid)\n if HAS_PSUTIL:\n return psutil.pid_exists(pid)\n else:\n try:\n os.kill(pid, 0) # SIG 0 is the \"are you alive?\" signal\n return True\n except OSError:\n return False\n",
"def load(self, fn_):\n '''\n Run the correct serialization to load a file\n '''\n data = fn_.read()\n fn_.close()\n if data:\n if six.PY3:\n return self.loads(data, encoding='utf-8')\n else:\n return self.loads(data)\n"
] |
# -*- coding: utf-8 -*-
# See doc/topics/jobs/index.rst
'''
Scheduling routines are located here. To activate the scheduler make the
``schedule`` option available to the master or minion configurations (master
config file or for the minion via config or pillar).
Detailed tutorial about scheduling jobs can be found :ref:`here
<scheduling-jobs>`.
'''
# Import python libs
from __future__ import absolute_import, with_statement, print_function, unicode_literals
import os
import sys
import time
import copy
import signal
import datetime
import itertools
import threading
import logging
import errno
import random
import weakref
# Import Salt libs
import salt.config
import salt.utils.args
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.master
import salt.utils.minion
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.utils.user
import salt.utils.yaml
import salt.loader
import salt.minion
import salt.payload
import salt.syspaths
import salt.exceptions
import salt.log.setup as log_setup
import salt.defaults.exitcodes
from salt.utils.odict import OrderedDict
from salt.exceptions import (
SaltInvocationError
)
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import dateutil.parser as dateutil_parser
_WHEN_SUPPORTED = True
_RANGE_SUPPORTED = True
except ImportError:
_WHEN_SUPPORTED = False
_RANGE_SUPPORTED = False
try:
import croniter
_CRON_SUPPORTED = True
except ImportError:
_CRON_SUPPORTED = False
# pylint: enable=import-error
log = logging.getLogger(__name__)
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.option
|
python
|
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
|
Return options merged from config and pillar
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L165-L171
| null |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule._get_schedule
|
python
|
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
|
Return the schedule data structure
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L173-L199
| null |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule._check_max_running
|
python
|
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
|
Return the schedule data structure
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L201-L246
| null |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.persist
|
python
|
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
|
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L248-L281
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n",
"def safe_dump(data, stream=None, **kwargs):\n '''\n Use a custom dumper to ensure that defaultdict and OrderedDict are\n represented properly. Ensure that unicode strings are encoded unless\n explicitly told not to.\n '''\n if 'allow_unicode' not in kwargs:\n kwargs['allow_unicode'] = True\n return yaml.dump(data, stream, Dumper=SafeOrderedDumper, **kwargs)\n",
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.delete_job
|
python
|
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
|
Deletes a job from the scheduler. Ignore jobs from pillar
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L283-L304
|
[
"def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n",
"def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n",
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n",
"def persist(self):\n '''\n Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf\n '''\n config_dir = self.opts.get('conf_dir', None)\n if config_dir is None and 'conf_file' in self.opts:\n config_dir = os.path.dirname(self.opts['conf_file'])\n if config_dir is None:\n config_dir = salt.syspaths.CONFIG_DIR\n\n minion_d_dir = os.path.join(\n config_dir,\n os.path.dirname(self.opts.get('default_include',\n salt.config.DEFAULT_MINION_OPTS['default_include'])))\n\n if not os.path.isdir(minion_d_dir):\n os.makedirs(minion_d_dir)\n\n schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')\n log.debug('Persisting schedule')\n schedule_data = self._get_schedule(include_pillar=False,\n remove_hidden=True)\n try:\n with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:\n fp_.write(\n salt.utils.stringutils.to_bytes(\n salt.utils.yaml.safe_dump(\n {'schedule': schedule_data}\n )\n )\n )\n except (IOError, OSError):\n log.error('Failed to persist the updated schedule',\n exc_info_on_loglevel=logging.DEBUG)\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.reset
|
python
|
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
|
Reset the scheduler to defaults
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L306-L314
| null |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.delete_job_prefix
|
python
|
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
|
Deletes a job from the scheduler. Ignores jobs from pillar
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L316-L340
|
[
"def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n",
"def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n",
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n",
"def persist(self):\n '''\n Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf\n '''\n config_dir = self.opts.get('conf_dir', None)\n if config_dir is None and 'conf_file' in self.opts:\n config_dir = os.path.dirname(self.opts['conf_file'])\n if config_dir is None:\n config_dir = salt.syspaths.CONFIG_DIR\n\n minion_d_dir = os.path.join(\n config_dir,\n os.path.dirname(self.opts.get('default_include',\n salt.config.DEFAULT_MINION_OPTS['default_include'])))\n\n if not os.path.isdir(minion_d_dir):\n os.makedirs(minion_d_dir)\n\n schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')\n log.debug('Persisting schedule')\n schedule_data = self._get_schedule(include_pillar=False,\n remove_hidden=True)\n try:\n with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:\n fp_.write(\n salt.utils.stringutils.to_bytes(\n salt.utils.yaml.safe_dump(\n {'schedule': schedule_data}\n )\n )\n )\n except (IOError, OSError):\n log.error('Failed to persist the updated schedule',\n exc_info_on_loglevel=logging.DEBUG)\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.add_job
|
python
|
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
|
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L342-L382
|
[
"def iterkeys(d, **kw):\n return d.iterkeys(**kw)\n",
"def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n",
"def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n",
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.modify_job
|
python
|
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
|
Modify a job in the scheduler. Ignores jobs from pillar
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L424-L438
|
[
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n",
"def persist(self):\n '''\n Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf\n '''\n config_dir = self.opts.get('conf_dir', None)\n if config_dir is None and 'conf_file' in self.opts:\n config_dir = os.path.dirname(self.opts['conf_file'])\n if config_dir is None:\n config_dir = salt.syspaths.CONFIG_DIR\n\n minion_d_dir = os.path.join(\n config_dir,\n os.path.dirname(self.opts.get('default_include',\n salt.config.DEFAULT_MINION_OPTS['default_include'])))\n\n if not os.path.isdir(minion_d_dir):\n os.makedirs(minion_d_dir)\n\n schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')\n log.debug('Persisting schedule')\n schedule_data = self._get_schedule(include_pillar=False,\n remove_hidden=True)\n try:\n with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:\n fp_.write(\n salt.utils.stringutils.to_bytes(\n salt.utils.yaml.safe_dump(\n {'schedule': schedule_data}\n )\n )\n )\n except (IOError, OSError):\n log.error('Failed to persist the updated schedule',\n exc_info_on_loglevel=logging.DEBUG)\n",
"def delete_job(self, name, persist=True):\n '''\n Deletes a job from the scheduler. Ignore jobs from pillar\n '''\n # ensure job exists, then delete it\n if name in self.opts['schedule']:\n del self.opts['schedule'][name]\n elif name in self._get_schedule(include_opts=False):\n log.warning(\"Cannot delete job %s, it's in the pillar!\", name)\n\n # Fire the complete event back along with updated list of schedule\n evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)\n evt.fire_event({'complete': True,\n 'schedule': self._get_schedule()},\n tag='/salt/minion/minion_schedule_delete_complete')\n\n # remove from self.intervals\n if name in self.intervals:\n del self.intervals[name]\n\n if persist:\n self.persist()\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.run_job
|
python
|
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
|
Run a schedule job now
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L440-L470
|
[
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n",
"def _run_job(self, func, data):\n job_dry_run = data.get('dry_run', False)\n if job_dry_run:\n log.debug('Job %s has \\'dry_run\\' set to True. Not running it.', data['name'])\n return\n\n multiprocessing_enabled = self.opts.get('multiprocessing', True)\n run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)\n\n if run_schedule_jobs_in_background is False:\n # Explicitly pass False for multiprocessing_enabled\n self.handle_func(False, func, data)\n return\n\n if multiprocessing_enabled and salt.utils.platform.is_windows():\n # Temporarily stash our function references.\n # You can't pickle function references, and pickling is\n # required when spawning new processes on Windows.\n functions = self.functions\n self.functions = {}\n returners = self.returners\n self.returners = {}\n utils = self.utils\n self.utils = {}\n\n try:\n if multiprocessing_enabled:\n thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess\n else:\n thread_cls = threading.Thread\n\n for i, _func in enumerate(func):\n _data = copy.deepcopy(data)\n if 'args' in _data and isinstance(_data['args'], list):\n _data['args'] = _data['args'][i]\n\n if multiprocessing_enabled:\n with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):\n proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))\n # Reset current signals before starting the process in\n # order not to inherit the current signal handlers\n proc.start()\n proc.join()\n else:\n proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))\n proc.start()\n finally:\n if multiprocessing_enabled and salt.utils.platform.is_windows():\n # Restore our function references.\n self.functions = functions\n self.returners = returners\n self.utils = utils\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.enable_schedule
|
python
|
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
|
Enable the scheduler.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L472-L481
|
[
"def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n",
"def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n",
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.reload
|
python
|
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
|
Reload the schedule from saved schedule file.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L494-L503
| null |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.list
|
python
|
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
|
List the current schedule items
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L505-L519
|
[
"def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n",
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.save_schedule
|
python
|
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
|
Save the current schedule
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L521-L530
|
[
"def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n",
"def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n",
"def persist(self):\n '''\n Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf\n '''\n config_dir = self.opts.get('conf_dir', None)\n if config_dir is None and 'conf_file' in self.opts:\n config_dir = os.path.dirname(self.opts['conf_file'])\n if config_dir is None:\n config_dir = salt.syspaths.CONFIG_DIR\n\n minion_d_dir = os.path.join(\n config_dir,\n os.path.dirname(self.opts.get('default_include',\n salt.config.DEFAULT_MINION_OPTS['default_include'])))\n\n if not os.path.isdir(minion_d_dir):\n os.makedirs(minion_d_dir)\n\n schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')\n log.debug('Persisting schedule')\n schedule_data = self._get_schedule(include_pillar=False,\n remove_hidden=True)\n try:\n with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:\n fp_.write(\n salt.utils.stringutils.to_bytes(\n salt.utils.yaml.safe_dump(\n {'schedule': schedule_data}\n )\n )\n )\n except (IOError, OSError):\n log.error('Failed to persist the updated schedule',\n exc_info_on_loglevel=logging.DEBUG)\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.skip_job
|
python
|
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
|
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L562-L584
|
[
"def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n",
"def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n",
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.get_next_fire_time
|
python
|
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
|
Return the next fire time for the specified job
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L586-L601
|
[
"def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n",
"def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n",
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.handle_func
|
python
|
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
|
Execute this method in a multiprocess or thread
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L611-L851
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False, keep_loop=False):\n '''\n Return an event object suitable for the named transport\n '''\n # TODO: AIO core is separate from transport\n if opts['transport'] in ('zeromq', 'tcp', 'detect'):\n return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors, keep_loop=keep_loop)\n",
"def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n",
"def appendproctitle(name):\n '''\n Append \"name\" to the current process title\n '''\n if HAS_SETPROCTITLE:\n setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)\n",
"def gen_jid(opts=None):\n '''\n Generate a jid\n '''\n if opts is None:\n salt.utils.versions.warn_until(\n 'Sodium',\n 'The `opts` argument was not passed into salt.utils.jid.gen_jid(). '\n 'This will be required starting in {version}.'\n )\n opts = {}\n global LAST_JID_DATETIME # pylint: disable=global-statement\n\n if opts.get('utc_jid', False):\n jid_dt = datetime.datetime.utcnow()\n else:\n jid_dt = datetime.datetime.now()\n if not opts.get('unique_jid', False):\n return '{0:%Y%m%d%H%M%S%f}'.format(jid_dt)\n if LAST_JID_DATETIME and LAST_JID_DATETIME >= jid_dt:\n jid_dt = LAST_JID_DATETIME + datetime.timedelta(microseconds=1)\n LAST_JID_DATETIME = jid_dt\n return '{0:%Y%m%d%H%M%S%f}_{1}'.format(jid_dt, os.getpid())\n",
"def get_proc_dir(cachedir, **kwargs):\n '''\n Given the cache directory, return the directory that process data is\n stored in, creating it if it doesn't exist.\n The following optional Keyword Arguments are handled:\n\n mode: which is anything os.makedir would accept as mode.\n\n uid: the uid to set, if not set, or it is None or -1 no changes are\n made. Same applies if the directory is already owned by this\n uid. Must be int. Works only on unix/unix like systems.\n\n gid: the gid to set, if not set, or it is None or -1 no changes are\n made. Same applies if the directory is already owned by this\n gid. Must be int. Works only on unix/unix like systems.\n '''\n fn_ = os.path.join(cachedir, 'proc')\n mode = kwargs.pop('mode', None)\n\n if mode is None:\n mode = {}\n else:\n mode = {'mode': mode}\n\n if not os.path.isdir(fn_):\n # proc_dir is not present, create it with mode settings\n os.makedirs(fn_, **mode)\n\n d_stat = os.stat(fn_)\n\n # if mode is not an empty dict then we have an explicit\n # dir mode. So lets check if mode needs to be changed.\n if mode:\n mode_part = S_IMODE(d_stat.st_mode)\n if mode_part != mode['mode']:\n os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])\n\n if hasattr(os, 'chown'):\n # only on unix/unix like systems\n uid = kwargs.pop('uid', -1)\n gid = kwargs.pop('gid', -1)\n\n # if uid and gid are both -1 then go ahead with\n # no changes at all\n if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \\\n [i for i in (uid, gid) if i != -1]:\n os.chown(fn_, uid, gid)\n\n return fn_\n",
"def setup_multiprocessing_logging(queue=None):\n '''\n This code should be called from within a running multiprocessing\n process instance.\n '''\n from salt.utils.platform import is_windows\n\n global __MP_LOGGING_CONFIGURED\n global __MP_LOGGING_QUEUE_HANDLER\n\n if __MP_IN_MAINPROCESS is True and not is_windows():\n # We're in the MainProcess, return! No multiprocessing logging setup shall happen\n # Windows is the exception where we want to set up multiprocessing\n # logging in the MainProcess.\n return\n\n try:\n logging._acquireLock() # pylint: disable=protected-access\n\n if __MP_LOGGING_CONFIGURED is True:\n return\n\n # Let's set it to true as fast as possible\n __MP_LOGGING_CONFIGURED = True\n\n if __MP_LOGGING_QUEUE_HANDLER is not None:\n return\n\n # The temp null and temp queue logging handlers will store messages.\n # Since noone will process them, memory usage will grow. If they\n # exist, remove them.\n __remove_null_logging_handler()\n __remove_queue_logging_handler()\n\n # Let's add a queue handler to the logging root handlers\n __MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(queue or get_multiprocessing_logging_queue())\n logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)\n # Set the logging root level to the lowest needed level to get all\n # desired messages.\n log_level = get_multiprocessing_logging_level()\n logging.root.setLevel(log_level)\n logging.getLogger(__name__).debug(\n 'Multiprocessing queue logging configured for the process running '\n 'under PID: %s at log level %s', os.getpid(), log_level\n )\n # The above logging call will create, in some situations, a futex wait\n # lock condition, probably due to the multiprocessing Queue's internal\n # lock and semaphore mechanisms.\n # A small sleep will allow us not to hit that futex wait lock condition.\n time.sleep(0.0001)\n finally:\n logging._releaseLock() # pylint: disable=protected-access\n",
"def daemonize_if(opts):\n '''\n Daemonize a module function process if multiprocessing is True and the\n process is not being called by salt-call\n '''\n if 'salt-call' in sys.argv[0]:\n return\n if not opts.get('multiprocessing', True):\n return\n if sys.platform.startswith('win'):\n return\n daemonize(False)\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/utils/schedule.py
|
Schedule.eval
|
python
|
def eval(self, now=None):
'''
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
'''
log.trace('==== evaluating schedule now %s =====', now)
loop_interval = self.opts['loop_interval']
if not isinstance(loop_interval, datetime.timedelta):
loop_interval = datetime.timedelta(seconds=loop_interval)
def _splay(splaytime):
'''
Calculate splaytime
'''
splay_ = None
if isinstance(splaytime, dict):
if splaytime['end'] >= splaytime['start']:
splay_ = random.randint(splaytime['start'],
splaytime['end'])
else:
log.error('schedule.handle_func: Invalid Splay, '
'end must be larger than start. Ignoring splay.')
else:
splay_ = random.randint(1, splaytime)
return splay_
def _handle_time_elements(data):
'''
Handle schedule item with time elements
seconds, minutes, hours, days
'''
if '_seconds' not in data:
interval = int(data.get('seconds', 0))
interval += int(data.get('minutes', 0)) * 60
interval += int(data.get('hours', 0)) * 3600
interval += int(data.get('days', 0)) * 86400
data['_seconds'] = interval
if not data['_next_fire_time']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
if interval < self.loop_interval:
self.loop_interval = interval
data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
def _handle_once(data, loop_interval):
'''
Handle schedule item with once
'''
if data['_next_fire_time']:
if data['_next_fire_time'] < now - loop_interval or \
data['_next_fire_time'] > now and \
not data['_splay']:
data['_continue'] = True
if not data['_next_fire_time'] and \
not data['_splay']:
once = data['once']
if not isinstance(once, datetime.datetime):
once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')
try:
once = datetime.datetime.strptime(data['once'],
once_fmt)
except (TypeError, ValueError):
data['_error'] = ('Date string could not '
'be parsed: {0}, {1}. '
'Ignoring job {2}.'.format(
data['once'],
once_fmt,
data['name']))
log.error(data['_error'])
return
data['_next_fire_time'] = once
data['_next_scheduled_fire_time'] = once
# If _next_fire_time is less than now, continue
if once < now - loop_interval:
data['_continue'] = True
def _handle_when(data, loop_interval):
'''
Handle schedule item with when
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['when'], list):
_when_data = [data['when']]
else:
_when_data = data['when']
_when = []
for i in _when_data:
if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and
i in self.opts['pillar']['whens']):
if not isinstance(self.opts['pillar']['whens'],
dict):
data['_error'] = ('Pillar item "whens" '
'must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['pillar']['whens'][i]
elif ('whens' in self.opts['grains'] and
i in self.opts['grains']['whens']):
if not isinstance(self.opts['grains']['whens'],
dict):
data['_error'] = ('Grain "whens" must be a dict. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
when_ = self.opts['grains']['whens'][i]
else:
when_ = i
if not isinstance(when_, datetime.datetime):
try:
when_ = dateutil_parser.parse(when_)
except ValueError:
data['_error'] = ('Invalid date string {0}. '
'Ignoring job {1}.'.format(i, data['name']))
log.error(data['_error'])
return
_when.append(when_)
if data['_splay']:
_when.append(data['_splay'])
# Sort the list of "whens" from earlier to later schedules
_when.sort()
# Copy the list so we can loop through it
for i in copy.deepcopy(_when):
if len(_when) > 1:
if i < now - loop_interval:
# Remove all missed schedules except the latest one.
# We need it to detect if it was triggered previously.
_when.remove(i)
if _when:
# Grab the first element, which is the next run time or
# last scheduled time in the past.
when = _when[0]
if when < now - loop_interval and \
not data.get('_run', False) and \
not data.get('run', False) and \
not data['_splay']:
data['_next_fire_time'] = None
data['_continue'] = True
return
if '_run' not in data:
# Prevent run of jobs from the past
data['_run'] = bool(when >= now - loop_interval)
if not data['_next_fire_time']:
data['_next_fire_time'] = when
data['_next_scheduled_fire_time'] = when
if data['_next_fire_time'] < when and \
not run and \
not data['_run']:
data['_next_fire_time'] = when
data['_run'] = True
elif not data.get('_run', False):
data['_next_fire_time'] = None
data['_continue'] = True
def _handle_cron(data, loop_interval):
'''
Handle schedule item with cron
'''
if not _CRON_SUPPORTED:
data['_error'] = ('Missing python-croniter. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if data['_next_fire_time'] is None:
# Get next time frame for a "cron" job if it has been never
# executed before or already executed in the past.
try:
data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)
except (ValueError, KeyError):
data['_error'] = ('Invalid cron string. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
# If next job run is scheduled more than 1 minute ahead and
# configured loop interval is longer than that, we should
# shorten it to get our job executed closer to the beginning
# of desired time.
interval = (now - data['_next_fire_time']).total_seconds()
if interval >= 60 and interval < self.loop_interval:
self.loop_interval = interval
def _handle_run_explicit(data, loop_interval):
'''
Handle schedule item with run_explicit
'''
_run_explicit = []
for _run_time in data['run_explicit']:
if isinstance(_run_time, datetime.datetime):
_run_explicit.append(_run_time)
else:
_run_explicit.append(datetime.datetime.strptime(_run_time['time'],
_run_time['time_fmt']))
data['run'] = False
# Copy the list so we can loop through it
for i in copy.deepcopy(_run_explicit):
if len(_run_explicit) > 1:
if i < now - loop_interval:
_run_explicit.remove(i)
if _run_explicit:
if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:
data['run'] = True
data['_next_fire_time'] = _run_explicit[0]
def _handle_skip_explicit(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
data['run'] = False
_skip_explicit = []
for _skip_time in data['skip_explicit']:
if isinstance(_skip_time, datetime.datetime):
_skip_explicit.append(_skip_time)
else:
_skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],
_skip_time['time_fmt']))
# Copy the list so we can loop through it
for i in copy.deepcopy(_skip_explicit):
if i < now - loop_interval:
_skip_explicit.remove(i)
if _skip_explicit:
if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'skip_explicit'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_skip_during_range(data, loop_interval):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['skip_during_range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['skip_during_range']['start']
end = data['skip_during_range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end in '
'skip_during_range. Ignoring '
'job {0}.'.format(data['name']))
log.error(data['_error'])
return
# Check to see if we should run the job immediately
# after the skip_during_range is over
if 'run_after_skip_range' in data and \
data['run_after_skip_range']:
if 'run_explicit' not in data:
data['run_explicit'] = []
# Add a run_explicit for immediately after the
# skip_during_range ends
_run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')
if _run_immediate not in data['run_explicit']:
data['run_explicit'].append({'time': _run_immediate,
'time_fmt': '%Y-%m-%dT%H:%M:%S'})
if end > start:
if start <= now <= end:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'in_skip_range'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger than '
'start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_range(data):
'''
Handle schedule item with skip_explicit
'''
if not _RANGE_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(data['range'], dict):
data['_error'] = ('schedule.handle_func: Invalid, range '
'must be specified as a dictionary.'
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
start = data['range']['start']
end = data['range']['end']
if not isinstance(start, datetime.datetime):
try:
start = dateutil_parser.parse(start)
except ValueError:
data['_error'] = ('Invalid date string for start. '
'Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if not isinstance(end, datetime.datetime):
try:
end = dateutil_parser.parse(end)
except ValueError:
data['_error'] = ('Invalid date string for end.'
' Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
return
if end > start:
if 'invert' in data['range'] and data['range']['invert']:
if now <= start or now >= end:
data['run'] = True
else:
data['_skip_reason'] = 'in_skip_range'
data['run'] = False
else:
if start <= now <= end:
data['run'] = True
else:
if self.skip_function:
data['run'] = True
data['func'] = self.skip_function
else:
data['_skip_reason'] = 'not_in_range'
data['run'] = False
else:
data['_error'] = ('schedule.handle_func: Invalid '
'range, end must be larger '
'than start. Ignoring job {0}.'.format(data['name']))
log.error(data['_error'])
def _handle_after(data):
'''
Handle schedule item with after
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
after = data['after']
if not isinstance(after, datetime.datetime):
after = dateutil_parser.parse(after)
if after >= now:
log.debug(
'After time has not passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'after_not_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _handle_until(data):
'''
Handle schedule item with until
'''
if not _WHEN_SUPPORTED:
data['_error'] = ('Missing python-dateutil. '
'Ignoring job {0}'.format(data['name']))
log.error(data['_error'])
return
until = data['until']
if not isinstance(until, datetime.datetime):
until = dateutil_parser.parse(until)
if until <= now:
log.debug(
'Until time has passed skipping job: %s.',
data['name']
)
data['_skip_reason'] = 'until_passed'
data['_skipped_time'] = now
data['_skipped'] = True
data['run'] = False
else:
data['run'] = True
def _chop_ms(dt):
'''
Remove the microseconds from a datetime object
'''
return dt - datetime.timedelta(microseconds=dt.microsecond)
schedule = self._get_schedule()
if not isinstance(schedule, dict):
raise ValueError('Schedule must be of type dict.')
if 'skip_function' in schedule:
self.skip_function = schedule['skip_function']
if 'skip_during_range' in schedule:
self.skip_during_range = schedule['skip_during_range']
if 'enabled' in schedule:
self.enabled = schedule['enabled']
if 'splay' in schedule:
self.splay = schedule['splay']
_hidden = ['enabled',
'skip_function',
'skip_during_range',
'splay']
for job, data in six.iteritems(schedule):
# Skip anything that is a global setting
if job in _hidden:
continue
# Clear these out between runs
for item in ['_continue',
'_error',
'_enabled',
'_skipped',
'_skip_reason',
'_skipped_time']:
if item in data:
del data[item]
run = False
if 'name' in data:
job_name = data['name']
else:
job_name = data['name'] = job
if not isinstance(data, dict):
log.error(
'Scheduled job "%s" should have a dict value, not %s',
job_name, type(data)
)
continue
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.info(
'Invalid function: %s in scheduled job %s.',
_func, job_name
)
if '_next_fire_time' not in data:
data['_next_fire_time'] = None
if '_splay' not in data:
data['_splay'] = None
if 'run_on_start' in data and \
data['run_on_start'] and \
'_run_on_start' not in data:
data['_run_on_start'] = True
if not now:
now = datetime.datetime.now()
# Used for quick lookups when detecting invalid option
# combinations.
schedule_keys = set(data.keys())
time_elements = ('seconds', 'minutes', 'hours', 'days')
scheduling_elements = ('when', 'cron', 'once')
invalid_sched_combos = [
set(i) for i in itertools.combinations(scheduling_elements, 2)
]
if any(i <= schedule_keys for i in invalid_sched_combos):
log.error(
'Unable to use "%s" options together. Ignoring.',
'", "'.join(scheduling_elements)
)
continue
invalid_time_combos = []
for item in scheduling_elements:
all_items = itertools.chain([item], time_elements)
invalid_time_combos.append(
set(itertools.combinations(all_items, 2)))
if any(set(x) <= schedule_keys for x in invalid_time_combos):
log.error(
'Unable to use "%s" with "%s" options. Ignoring',
'", "'.join(time_elements),
'", "'.join(scheduling_elements)
)
continue
if 'run_explicit' in data:
_handle_run_explicit(data, loop_interval)
run = data['run']
if True in [True for item in time_elements if item in data]:
_handle_time_elements(data)
elif 'once' in data:
_handle_once(data, loop_interval)
elif 'when' in data:
_handle_when(data, loop_interval)
elif 'cron' in data:
_handle_cron(data, loop_interval)
else:
continue
# Something told us to continue, so we continue
if '_continue' in data and data['_continue']:
continue
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
seconds = int((_chop_ms(data['_next_fire_time']) - _chop_ms(now)).total_seconds())
# If there is no job specific splay available,
# grab the global which defaults to None.
if 'splay' not in data:
data['splay'] = self.splay
if 'splay' in data and data['splay']:
# Got "splay" configured, make decision to run a job based on that
if not data['_splay']:
# Try to add "splay" time only if next job fire time is
# still in the future. We should trigger job run
# immediately otherwise.
splay = _splay(data['splay'])
if now < data['_next_fire_time'] + datetime.timedelta(seconds=splay):
log.debug('schedule.handle_func: Adding splay of '
'%s seconds to next run.', splay)
data['_splay'] = data['_next_fire_time'] + datetime.timedelta(seconds=splay)
if 'when' in data:
data['_run'] = True
else:
run = True
if data['_splay']:
# The "splay" configuration has been already processed, just use it
seconds = (data['_splay'] - now).total_seconds()
if 'when' in data:
data['_next_fire_time'] = data['_splay']
if '_seconds' in data:
if seconds <= 0:
run = True
elif 'when' in data and data['_run']:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
data['_run'] = False
run = True
elif 'cron' in data:
# Reset next scheduled time because it is in the past now,
# and we should trigger the job run, then wait for the next one.
if seconds <= 0:
data['_next_fire_time'] = None
run = True
elif 'once' in data:
if data['_next_fire_time'] <= now <= (data['_next_fire_time'] + loop_interval):
run = True
elif seconds == 0:
run = True
if '_run_on_start' in data and data['_run_on_start']:
run = True
data['_run_on_start'] = False
elif run:
if 'range' in data:
_handle_range(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
# If there is no job specific skip_during_range available,
# grab the global which defaults to None.
if 'skip_during_range' not in data and self.skip_during_range:
data['skip_during_range'] = self.skip_during_range
if 'skip_during_range' in data and data['skip_during_range']:
_handle_skip_during_range(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'skip_explicit' in data:
_handle_skip_explicit(data, loop_interval)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# Override the functiton if passed back
if 'func' in data:
func = data['func']
if 'until' in data:
_handle_until(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
if 'after' in data:
_handle_after(data)
# An error occurred so we bail out
if '_error' in data and data['_error']:
continue
run = data['run']
# If args is a list and less than the number of functions
# run is set to False.
if 'args' in data and isinstance(data['args'], list):
if len(data['args']) < len(func):
data['_error'] = ('Number of arguments is less than '
'the number of functions. Ignoring job.')
log.error(data['_error'])
run = False
# If the job item has continue, then we set run to False
# so the job does not run but we still get the important
# information calculated, eg. _next_fire_time
if '_continue' in data and data['_continue']:
run = False
# If there is no job specific enabled available,
# grab the global which defaults to True.
if 'enabled' not in data:
data['enabled'] = self.enabled
# If globally disabled, disable the job
if not self.enabled:
data['enabled'] = self.enabled
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
# Job is disabled, set run to False
if 'enabled' in data and not data['enabled']:
data['_enabled'] = False
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
run = False
miss_msg = ''
if seconds < 0:
miss_msg = ' (runtime missed ' \
'by {0} seconds)'.format(abs(seconds))
try:
if run:
# Job is disabled, continue
if 'enabled' in data and not data['enabled']:
log.debug('Job: %s is disabled', job_name)
data['_skip_reason'] = 'disabled'
data['_skipped_time'] = now
data['_skipped'] = True
continue
if 'jid_include' not in data or data['jid_include']:
data['jid_include'] = True
log.debug('schedule: Job %s was scheduled with jid_include, '
'adding to cache (jid_include defaults to True)',
job_name)
if 'maxrunning' in data:
log.debug('schedule: Job %s was scheduled with a max '
'number of %s', job_name, data['maxrunning'])
else:
log.info('schedule: maxrunning parameter was not specified for '
'job %s, defaulting to 1.', job_name)
data['maxrunning'] = 1
if not self.standalone:
data['run'] = run
data = self._check_max_running(func,
data,
self.opts,
now)
run = data['run']
# Check run again, just in case _check_max_running
# set run to False
if run:
log.info('Running scheduled job: %s%s', job_name, miss_msg)
self._run_job(func, data)
finally:
# Only set _last_run if the job ran
if run:
data['_last_run'] = now
data['_splay'] = None
if '_seconds' in data:
if self.standalone:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif '_skipped' in data and data['_skipped']:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
elif run:
data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])
|
Evaluate and execute the schedule
:param datetime now: Override current time with a datetime object instance``
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L853-L1637
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def _get_schedule(self,\n include_opts=True,\n include_pillar=True,\n remove_hidden=False):\n '''\n Return the schedule data structure\n '''\n schedule = {}\n if include_pillar:\n pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})\n if not isinstance(pillar_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(pillar_schedule)\n if include_opts:\n opts_schedule = self.opts.get('schedule', {})\n if not isinstance(opts_schedule, dict):\n raise ValueError('Schedule must be of type dict.')\n schedule.update(opts_schedule)\n\n if remove_hidden:\n _schedule = copy.deepcopy(schedule)\n for job in _schedule:\n if isinstance(_schedule[job], dict):\n for item in _schedule[job]:\n if item.startswith('_'):\n del schedule[job][item]\n return schedule\n",
"def _check_max_running(self, func, data, opts, now):\n '''\n Return the schedule data structure\n '''\n # Check to see if there are other jobs with this\n # signature running. If there are more than maxrunning\n # jobs present then don't start another.\n # If jid_include is False for this job we can ignore all this\n # NOTE--jid_include defaults to True, thus if it is missing from the data\n # dict we treat it like it was there and is True\n\n # Check if we're able to run\n if not data['run']:\n return data\n if 'jid_include' not in data or data['jid_include']:\n jobcount = 0\n if self.opts['__role'] == 'master':\n current_jobs = salt.utils.master.get_running_jobs(self.opts)\n else:\n current_jobs = salt.utils.minion.running(self.opts)\n for job in current_jobs:\n if 'schedule' in job:\n log.debug(\n 'schedule.handle_func: Checking job against fun '\n '%s: %s', func, job\n )\n if data['name'] == job['schedule'] \\\n and salt.utils.process.os_is_running(job['pid']):\n jobcount += 1\n log.debug(\n 'schedule.handle_func: Incrementing jobcount, '\n 'now %s, maxrunning is %s',\n jobcount, data['maxrunning']\n )\n if jobcount >= data['maxrunning']:\n log.debug(\n 'schedule.handle_func: The scheduled job '\n '%s was not started, %s already running',\n data['name'], data['maxrunning']\n )\n data['_skip_reason'] = 'maxrunning'\n data['_skipped'] = True\n data['_skipped_time'] = now\n data['run'] = False\n return data\n return data\n",
"def _run_job(self, func, data):\n job_dry_run = data.get('dry_run', False)\n if job_dry_run:\n log.debug('Job %s has \\'dry_run\\' set to True. Not running it.', data['name'])\n return\n\n multiprocessing_enabled = self.opts.get('multiprocessing', True)\n run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)\n\n if run_schedule_jobs_in_background is False:\n # Explicitly pass False for multiprocessing_enabled\n self.handle_func(False, func, data)\n return\n\n if multiprocessing_enabled and salt.utils.platform.is_windows():\n # Temporarily stash our function references.\n # You can't pickle function references, and pickling is\n # required when spawning new processes on Windows.\n functions = self.functions\n self.functions = {}\n returners = self.returners\n self.returners = {}\n utils = self.utils\n self.utils = {}\n\n try:\n if multiprocessing_enabled:\n thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess\n else:\n thread_cls = threading.Thread\n\n for i, _func in enumerate(func):\n _data = copy.deepcopy(data)\n if 'args' in _data and isinstance(_data['args'], list):\n _data['args'] = _data['args'][i]\n\n if multiprocessing_enabled:\n with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):\n proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))\n # Reset current signals before starting the process in\n # order not to inherit the current signal handlers\n proc.start()\n proc.join()\n else:\n proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))\n proc.start()\n finally:\n if multiprocessing_enabled and salt.utils.platform.is_windows():\n # Restore our function references.\n self.functions = functions\n self.returners = returners\n self.utils = utils\n",
"def _splay(splaytime):\n '''\n Calculate splaytime\n '''\n splay_ = None\n if isinstance(splaytime, dict):\n if splaytime['end'] >= splaytime['start']:\n splay_ = random.randint(splaytime['start'],\n splaytime['end'])\n else:\n log.error('schedule.handle_func: Invalid Splay, '\n 'end must be larger than start. Ignoring splay.')\n else:\n splay_ = random.randint(1, splaytime)\n return splay_\n",
"def _handle_time_elements(data):\n '''\n Handle schedule item with time elements\n seconds, minutes, hours, days\n '''\n if '_seconds' not in data:\n interval = int(data.get('seconds', 0))\n interval += int(data.get('minutes', 0)) * 60\n interval += int(data.get('hours', 0)) * 3600\n interval += int(data.get('days', 0)) * 86400\n\n data['_seconds'] = interval\n\n if not data['_next_fire_time']:\n data['_next_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])\n\n if interval < self.loop_interval:\n self.loop_interval = interval\n\n data['_next_scheduled_fire_time'] = now + datetime.timedelta(seconds=data['_seconds'])\n",
"def _handle_once(data, loop_interval):\n '''\n Handle schedule item with once\n '''\n if data['_next_fire_time']:\n if data['_next_fire_time'] < now - loop_interval or \\\n data['_next_fire_time'] > now and \\\n not data['_splay']:\n data['_continue'] = True\n\n if not data['_next_fire_time'] and \\\n not data['_splay']:\n once = data['once']\n if not isinstance(once, datetime.datetime):\n once_fmt = data.get('once_fmt', '%Y-%m-%dT%H:%M:%S')\n try:\n once = datetime.datetime.strptime(data['once'],\n once_fmt)\n except (TypeError, ValueError):\n data['_error'] = ('Date string could not '\n 'be parsed: {0}, {1}. '\n 'Ignoring job {2}.'.format(\n data['once'],\n once_fmt,\n data['name']))\n log.error(data['_error'])\n return\n data['_next_fire_time'] = once\n data['_next_scheduled_fire_time'] = once\n # If _next_fire_time is less than now, continue\n if once < now - loop_interval:\n data['_continue'] = True\n",
"def _handle_when(data, loop_interval):\n '''\n Handle schedule item with when\n '''\n if not _WHEN_SUPPORTED:\n data['_error'] = ('Missing python-dateutil. '\n 'Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n if not isinstance(data['when'], list):\n _when_data = [data['when']]\n else:\n _when_data = data['when']\n\n _when = []\n for i in _when_data:\n if ('pillar' in self.opts and 'whens' in self.opts['pillar'] and\n i in self.opts['pillar']['whens']):\n if not isinstance(self.opts['pillar']['whens'],\n dict):\n data['_error'] = ('Pillar item \"whens\" '\n 'must be a dict. '\n 'Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n when_ = self.opts['pillar']['whens'][i]\n elif ('whens' in self.opts['grains'] and\n i in self.opts['grains']['whens']):\n if not isinstance(self.opts['grains']['whens'],\n dict):\n data['_error'] = ('Grain \"whens\" must be a dict. '\n 'Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n when_ = self.opts['grains']['whens'][i]\n else:\n when_ = i\n\n if not isinstance(when_, datetime.datetime):\n try:\n when_ = dateutil_parser.parse(when_)\n except ValueError:\n data['_error'] = ('Invalid date string {0}. '\n 'Ignoring job {1}.'.format(i, data['name']))\n log.error(data['_error'])\n return\n\n _when.append(when_)\n\n if data['_splay']:\n _when.append(data['_splay'])\n\n # Sort the list of \"whens\" from earlier to later schedules\n _when.sort()\n\n # Copy the list so we can loop through it\n for i in copy.deepcopy(_when):\n if len(_when) > 1:\n if i < now - loop_interval:\n # Remove all missed schedules except the latest one.\n # We need it to detect if it was triggered previously.\n _when.remove(i)\n\n if _when:\n # Grab the first element, which is the next run time or\n # last scheduled time in the past.\n when = _when[0]\n\n if when < now - loop_interval and \\\n not data.get('_run', False) and \\\n not data.get('run', False) and \\\n not data['_splay']:\n data['_next_fire_time'] = None\n data['_continue'] = True\n return\n\n if '_run' not in data:\n # Prevent run of jobs from the past\n data['_run'] = bool(when >= now - loop_interval)\n\n if not data['_next_fire_time']:\n data['_next_fire_time'] = when\n\n data['_next_scheduled_fire_time'] = when\n\n if data['_next_fire_time'] < when and \\\n not run and \\\n not data['_run']:\n data['_next_fire_time'] = when\n data['_run'] = True\n\n elif not data.get('_run', False):\n data['_next_fire_time'] = None\n data['_continue'] = True\n",
"def _handle_cron(data, loop_interval):\n '''\n Handle schedule item with cron\n '''\n if not _CRON_SUPPORTED:\n data['_error'] = ('Missing python-croniter. '\n 'Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n if data['_next_fire_time'] is None:\n # Get next time frame for a \"cron\" job if it has been never\n # executed before or already executed in the past.\n try:\n data['_next_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)\n data['_next_scheduled_fire_time'] = croniter.croniter(data['cron'], now).get_next(datetime.datetime)\n except (ValueError, KeyError):\n data['_error'] = ('Invalid cron string. '\n 'Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n # If next job run is scheduled more than 1 minute ahead and\n # configured loop interval is longer than that, we should\n # shorten it to get our job executed closer to the beginning\n # of desired time.\n interval = (now - data['_next_fire_time']).total_seconds()\n if interval >= 60 and interval < self.loop_interval:\n self.loop_interval = interval\n",
"def _handle_run_explicit(data, loop_interval):\n '''\n Handle schedule item with run_explicit\n '''\n _run_explicit = []\n for _run_time in data['run_explicit']:\n if isinstance(_run_time, datetime.datetime):\n _run_explicit.append(_run_time)\n else:\n _run_explicit.append(datetime.datetime.strptime(_run_time['time'],\n _run_time['time_fmt']))\n data['run'] = False\n\n # Copy the list so we can loop through it\n for i in copy.deepcopy(_run_explicit):\n if len(_run_explicit) > 1:\n if i < now - loop_interval:\n _run_explicit.remove(i)\n\n if _run_explicit:\n if _run_explicit[0] <= now < _run_explicit[0] + loop_interval:\n data['run'] = True\n data['_next_fire_time'] = _run_explicit[0]\n",
"def _handle_skip_explicit(data, loop_interval):\n '''\n Handle schedule item with skip_explicit\n '''\n data['run'] = False\n\n _skip_explicit = []\n for _skip_time in data['skip_explicit']:\n if isinstance(_skip_time, datetime.datetime):\n _skip_explicit.append(_skip_time)\n else:\n _skip_explicit.append(datetime.datetime.strptime(_skip_time['time'],\n _skip_time['time_fmt']))\n\n # Copy the list so we can loop through it\n for i in copy.deepcopy(_skip_explicit):\n if i < now - loop_interval:\n _skip_explicit.remove(i)\n\n if _skip_explicit:\n if _skip_explicit[0] <= now <= (_skip_explicit[0] + loop_interval):\n if self.skip_function:\n data['run'] = True\n data['func'] = self.skip_function\n else:\n data['_skip_reason'] = 'skip_explicit'\n data['_skipped_time'] = now\n data['_skipped'] = True\n data['run'] = False\n else:\n data['run'] = True\n",
"def _handle_skip_during_range(data, loop_interval):\n '''\n Handle schedule item with skip_explicit\n '''\n if not _RANGE_SUPPORTED:\n data['_error'] = ('Missing python-dateutil. '\n 'Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n if not isinstance(data['skip_during_range'], dict):\n data['_error'] = ('schedule.handle_func: Invalid, range '\n 'must be specified as a dictionary. '\n 'Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n start = data['skip_during_range']['start']\n end = data['skip_during_range']['end']\n if not isinstance(start, datetime.datetime):\n try:\n start = dateutil_parser.parse(start)\n except ValueError:\n data['_error'] = ('Invalid date string for start in '\n 'skip_during_range. Ignoring '\n 'job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n if not isinstance(end, datetime.datetime):\n try:\n end = dateutil_parser.parse(end)\n except ValueError:\n data['_error'] = ('Invalid date string for end in '\n 'skip_during_range. Ignoring '\n 'job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n # Check to see if we should run the job immediately\n # after the skip_during_range is over\n if 'run_after_skip_range' in data and \\\n data['run_after_skip_range']:\n if 'run_explicit' not in data:\n data['run_explicit'] = []\n # Add a run_explicit for immediately after the\n # skip_during_range ends\n _run_immediate = (end + loop_interval).strftime('%Y-%m-%dT%H:%M:%S')\n if _run_immediate not in data['run_explicit']:\n data['run_explicit'].append({'time': _run_immediate,\n 'time_fmt': '%Y-%m-%dT%H:%M:%S'})\n\n if end > start:\n if start <= now <= end:\n if self.skip_function:\n data['run'] = True\n data['func'] = self.skip_function\n else:\n data['_skip_reason'] = 'in_skip_range'\n data['_skipped_time'] = now\n data['_skipped'] = True\n data['run'] = False\n else:\n data['run'] = True\n else:\n data['_error'] = ('schedule.handle_func: Invalid '\n 'range, end must be larger than '\n 'start. Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n",
"def _handle_range(data):\n '''\n Handle schedule item with skip_explicit\n '''\n if not _RANGE_SUPPORTED:\n data['_error'] = ('Missing python-dateutil. '\n 'Ignoring job {0}'.format(data['name']))\n log.error(data['_error'])\n return\n\n if not isinstance(data['range'], dict):\n data['_error'] = ('schedule.handle_func: Invalid, range '\n 'must be specified as a dictionary.'\n 'Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n start = data['range']['start']\n end = data['range']['end']\n if not isinstance(start, datetime.datetime):\n try:\n start = dateutil_parser.parse(start)\n except ValueError:\n data['_error'] = ('Invalid date string for start. '\n 'Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n if not isinstance(end, datetime.datetime):\n try:\n end = dateutil_parser.parse(end)\n except ValueError:\n data['_error'] = ('Invalid date string for end.'\n ' Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n return\n\n if end > start:\n if 'invert' in data['range'] and data['range']['invert']:\n if now <= start or now >= end:\n data['run'] = True\n else:\n data['_skip_reason'] = 'in_skip_range'\n data['run'] = False\n else:\n if start <= now <= end:\n data['run'] = True\n else:\n if self.skip_function:\n data['run'] = True\n data['func'] = self.skip_function\n else:\n data['_skip_reason'] = 'not_in_range'\n data['run'] = False\n else:\n data['_error'] = ('schedule.handle_func: Invalid '\n 'range, end must be larger '\n 'than start. Ignoring job {0}.'.format(data['name']))\n log.error(data['_error'])\n",
"def _handle_after(data):\n '''\n Handle schedule item with after\n '''\n if not _WHEN_SUPPORTED:\n data['_error'] = ('Missing python-dateutil. '\n 'Ignoring job {0}'.format(data['name']))\n log.error(data['_error'])\n return\n\n after = data['after']\n if not isinstance(after, datetime.datetime):\n after = dateutil_parser.parse(after)\n\n if after >= now:\n log.debug(\n 'After time has not passed skipping job: %s.',\n data['name']\n )\n data['_skip_reason'] = 'after_not_passed'\n data['_skipped_time'] = now\n data['_skipped'] = True\n data['run'] = False\n else:\n data['run'] = True\n",
"def _handle_until(data):\n '''\n Handle schedule item with until\n '''\n if not _WHEN_SUPPORTED:\n data['_error'] = ('Missing python-dateutil. '\n 'Ignoring job {0}'.format(data['name']))\n log.error(data['_error'])\n return\n\n until = data['until']\n if not isinstance(until, datetime.datetime):\n until = dateutil_parser.parse(until)\n\n if until <= now:\n log.debug(\n 'Until time has passed skipping job: %s.',\n data['name']\n )\n data['_skip_reason'] = 'until_passed'\n data['_skipped_time'] = now\n data['_skipped'] = True\n data['run'] = False\n else:\n data['run'] = True\n",
"def _chop_ms(dt):\n '''\n Remove the microseconds from a datetime object\n '''\n return dt - datetime.timedelta(microseconds=dt.microsecond)\n"
] |
class Schedule(object):
'''
Create a Schedule object, pass in the opts and the functions dict to use
'''
instance = None
def __new__(cls, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
'''
Only create one instance of Schedule
'''
if cls.instance is None or new_instance is True:
log.debug('Initializing new Schedule')
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
instance = object.__new__(cls)
instance.__singleton_init__(opts, functions,
returners=returners,
intervals=intervals,
cleanup=cleanup,
proxy=proxy,
standalone=standalone,
utils=utils)
if new_instance is True:
return instance
cls.instance = instance
else:
log.debug('Re-using Schedule')
return cls.instance
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
new_instance=False,
utils=None):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts,
functions,
returners=None,
intervals=None,
cleanup=None,
proxy=None,
standalone=False,
utils=None):
self.opts = opts
self.proxy = proxy
self.functions = functions
self.utils = utils or salt.loader.utils(opts)
self.standalone = standalone
self.skip_function = None
self.skip_during_range = None
self.splay = None
self.enabled = True
if isinstance(intervals, dict):
self.intervals = intervals
else:
self.intervals = {}
if not self.standalone:
if hasattr(returners, '__getitem__'):
self.returners = returners
else:
self.returners = returners.loader.gen_functions()
self.time_offset = self.functions.get('timezone.get_offset', lambda: '0000')()
self.schedule_returner = self.option('schedule_returner')
# Keep track of the lowest loop interval needed in this variable
self.loop_interval = six.MAXSIZE
if not self.standalone:
clean_proc_dir(opts)
if cleanup:
for prefix in cleanup:
self.delete_job_prefix(prefix)
def __getnewargs__(self):
return self.opts, self.functions, self.returners, self.intervals, None
def option(self, opt):
'''
Return options merged from config and pillar
'''
if 'config.merge' in self.functions:
return self.functions['config.merge'](opt, {}, omit_master=True)
return self.opts.get(opt, {})
def _get_schedule(self,
include_opts=True,
include_pillar=True,
remove_hidden=False):
'''
Return the schedule data structure
'''
schedule = {}
if include_pillar:
pillar_schedule = self.opts.get('pillar', {}).get('schedule', {})
if not isinstance(pillar_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(pillar_schedule)
if include_opts:
opts_schedule = self.opts.get('schedule', {})
if not isinstance(opts_schedule, dict):
raise ValueError('Schedule must be of type dict.')
schedule.update(opts_schedule)
if remove_hidden:
_schedule = copy.deepcopy(schedule)
for job in _schedule:
if isinstance(_schedule[job], dict):
for item in _schedule[job]:
if item.startswith('_'):
del schedule[job][item]
return schedule
def _check_max_running(self, func, data, opts, now):
'''
Return the schedule data structure
'''
# Check to see if there are other jobs with this
# signature running. If there are more than maxrunning
# jobs present then don't start another.
# If jid_include is False for this job we can ignore all this
# NOTE--jid_include defaults to True, thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we're able to run
if not data['run']:
return data
if 'jid_include' not in data or data['jid_include']:
jobcount = 0
if self.opts['__role'] == 'master':
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if 'schedule' in job:
log.debug(
'schedule.handle_func: Checking job against fun '
'%s: %s', func, job
)
if data['name'] == job['schedule'] \
and salt.utils.process.os_is_running(job['pid']):
jobcount += 1
log.debug(
'schedule.handle_func: Incrementing jobcount, '
'now %s, maxrunning is %s',
jobcount, data['maxrunning']
)
if jobcount >= data['maxrunning']:
log.debug(
'schedule.handle_func: The scheduled job '
'%s was not started, %s already running',
data['name'], data['maxrunning']
)
data['_skip_reason'] = 'maxrunning'
data['_skipped'] = True
data['_skipped_time'] = now
data['run'] = False
return data
return data
def persist(self):
'''
Persist the modified schedule into <<configdir>>/<<default_include>>/_schedule.conf
'''
config_dir = self.opts.get('conf_dir', None)
if config_dir is None and 'conf_file' in self.opts:
config_dir = os.path.dirname(self.opts['conf_file'])
if config_dir is None:
config_dir = salt.syspaths.CONFIG_DIR
minion_d_dir = os.path.join(
config_dir,
os.path.dirname(self.opts.get('default_include',
salt.config.DEFAULT_MINION_OPTS['default_include'])))
if not os.path.isdir(minion_d_dir):
os.makedirs(minion_d_dir)
schedule_conf = os.path.join(minion_d_dir, '_schedule.conf')
log.debug('Persisting schedule')
schedule_data = self._get_schedule(include_pillar=False,
remove_hidden=True)
try:
with salt.utils.files.fopen(schedule_conf, 'wb+') as fp_:
fp_.write(
salt.utils.stringutils.to_bytes(
salt.utils.yaml.safe_dump(
{'schedule': schedule_data}
)
)
)
except (IOError, OSError):
log.error('Failed to persist the updated schedule',
exc_info_on_loglevel=logging.DEBUG)
def delete_job(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignore jobs from pillar
'''
# ensure job exists, then delete it
if name in self.opts['schedule']:
del self.opts['schedule'][name]
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot delete job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
if name in self.intervals:
del self.intervals[name]
if persist:
self.persist()
def reset(self):
'''
Reset the scheduler to defaults
'''
self.skip_function = None
self.skip_during_range = None
self.enabled = True
self.splay = None
self.opts['schedule'] = {}
def delete_job_prefix(self, name, persist=True):
'''
Deletes a job from the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then delete it
for job in list(self.opts['schedule'].keys()):
if job.startswith(name):
del self.opts['schedule'][job]
for job in self._get_schedule(include_opts=False):
if job.startswith(name):
log.warning("Cannot delete job %s, it's in the pillar!", job)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_delete_complete')
# remove from self.intervals
for job in list(self.intervals.keys()):
if job.startswith(name):
del self.intervals[job]
if persist:
self.persist()
def add_job(self, data, persist=True):
'''
Adds a new job to the scheduler. The format is the same as required in
the configuration file. See the docs on how YAML is interpreted into
python data-structures to make sure, you pass correct dictionaries.
'''
# we don't do any checking here besides making sure its a dict.
# eval() already does for us and raises errors accordingly
if not isinstance(data, dict):
raise ValueError('Scheduled jobs have to be of type dict.')
if not len(data) == 1:
raise ValueError('You can only schedule one new job at a time.')
# if enabled is not included in the job,
# assume job is enabled.
for job in data:
if 'enabled' not in data[job]:
data[job]['enabled'] = True
new_job = next(six.iterkeys(data))
if new_job in self._get_schedule(include_opts=False):
log.warning("Cannot update job %s, it's in the pillar!", new_job)
elif new_job in self.opts['schedule']:
log.info('Updating job settings for scheduled job: %s', new_job)
self.opts['schedule'].update(data)
else:
log.info('Added new job %s to scheduler', new_job)
self.opts['schedule'].update(data)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_add_complete')
if persist:
self.persist()
def enable_job(self, name, persist=True):
'''
Enable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then enable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = True
log.info('Enabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_job_complete')
if persist:
self.persist()
def disable_job(self, name, persist=True):
'''
Disable a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then disable it
if name in self.opts['schedule']:
self.opts['schedule'][name]['enabled'] = False
log.info('Disabling job %s in scheduler', name)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_job_complete')
if persist:
self.persist()
def modify_job(self, name, schedule, persist=True):
'''
Modify a job in the scheduler. Ignores jobs from pillar
'''
# ensure job exists, then replace it
if name in self.opts['schedule']:
self.delete_job(name, persist)
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
return
self.opts['schedule'][name] = schedule
if persist:
self.persist()
def run_job(self, name):
'''
Run a schedule job now
'''
data = self._get_schedule().get(name, {})
if 'function' in data:
func = data['function']
elif 'func' in data:
func = data['func']
elif 'fun' in data:
func = data['fun']
else:
func = None
if not isinstance(func, list):
func = [func]
for _func in func:
if _func not in self.functions:
log.error(
'Invalid function: %s in scheduled job %s.',
_func, name
)
if 'name' not in data:
data['name'] = name
log.info('Running Job: %s', name)
# Grab run, assume True
run = data.get('run', True)
if run:
self._run_job(_func, data)
def enable_schedule(self):
'''
Enable the scheduler.
'''
self.opts['schedule']['enabled'] = True
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_enabled_complete')
def disable_schedule(self):
'''
Disable the scheduler.
'''
self.opts['schedule']['enabled'] = False
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_disabled_complete')
def reload(self, schedule):
'''
Reload the schedule from saved schedule file.
'''
# Remove all jobs from self.intervals
self.intervals = {}
if 'schedule' in schedule:
schedule = schedule['schedule']
self.opts.setdefault('schedule', {}).update(schedule)
def list(self, where):
'''
List the current schedule items
'''
if where == 'pillar':
schedule = self._get_schedule(include_opts=False)
elif where == 'opts':
schedule = self._get_schedule(include_pillar=False)
else:
schedule = self._get_schedule()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'schedule': schedule},
tag='/salt/minion/minion_schedule_list_complete')
def save_schedule(self):
'''
Save the current schedule
'''
self.persist()
# Fire the complete event back along with the list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True},
tag='/salt/minion/minion_schedule_saved')
def postpone_job(self, name, data):
'''
Postpone a job in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
new_time = data['new_time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
if 'run_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['run_explicit'] = []
self.opts['schedule'][name]['run_explicit'].append({'time': new_time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_postpone_job_complete')
def skip_job(self, name, data):
'''
Skip a job at a specific time in the scheduler.
Ignores jobs from pillar
'''
time = data['time']
time_fmt = data.get('time_fmt', '%Y-%m-%dT%H:%M:%S')
# ensure job exists, then disable it
if name in self.opts['schedule']:
if 'skip_explicit' not in self.opts['schedule'][name]:
self.opts['schedule'][name]['skip_explicit'] = []
self.opts['schedule'][name]['skip_explicit'].append({'time': time,
'time_fmt': time_fmt})
elif name in self._get_schedule(include_opts=False):
log.warning("Cannot modify job %s, it's in the pillar!", name)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True,
'schedule': self._get_schedule()},
tag='/salt/minion/minion_schedule_skip_job_complete')
def get_next_fire_time(self, name, fmt='%Y-%m-%dT%H:%M:%S'):
'''
Return the next fire time for the specified job
'''
schedule = self._get_schedule()
_next_fire_time = None
if schedule:
_next_fire_time = schedule.get(name, {}).get('_next_fire_time', None)
if _next_fire_time:
_next_fire_time = _next_fire_time.strftime(fmt)
# Fire the complete event back along with updated list of schedule
evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False)
evt.fire_event({'complete': True, 'next_fire_time': _next_fire_time},
tag='/salt/minion/minion_schedule_next_fire_time_complete')
def job_status(self, name):
'''
Return the specified schedule item
'''
schedule = self._get_schedule()
return schedule.get(name, {})
def handle_func(self, multiprocessing_enabled, func, data):
'''
Execute this method in a multiprocess or thread
'''
if salt.utils.platform.is_windows() \
or self.opts.get('transport') == 'zeromq':
# Since function references can't be pickled and pickling
# is required when spawning new processes on Windows, regenerate
# the functions and returners.
# This also needed for ZeroMQ transport to reset all functions
# context data that could keep paretns connections. ZeroMQ will
# hang on polling parents connections from the child process.
if self.opts['__role'] == 'master':
self.functions = salt.loader.runner(self.opts, utils=self.utils)
else:
self.functions = salt.loader.minion_mods(self.opts, proxy=self.proxy, utils=self.utils)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
ret = {'id': self.opts.get('id', 'master'),
'fun': func,
'fun_args': [],
'schedule': data['name'],
'jid': salt.utils.jid.gen_jid(self.opts)}
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
ret['metadata']['_TOS'] = self.time_offset
ret['metadata']['_TS'] = time.ctime()
ret['metadata']['_TT'] = time.strftime('%Y %B %d %a %H %m', time.gmtime())
else:
log.warning('schedule: The metadata parameter must be '
'specified as a dictionary. Ignoring.')
if multiprocessing_enabled:
# We just want to modify the process name if we're on a different process
salt.utils.process.appendproctitle('{0} {1}'.format(self.__class__.__name__, ret['jid']))
data_returner = data.get('returner', None)
if not self.standalone:
proc_fn = os.path.join(
salt.minion.get_proc_dir(self.opts['cachedir']),
ret['jid']
)
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# Reconfigure multiprocessing logging after daemonizing
log_setup.setup_multiprocessing_logging()
if multiprocessing_enabled:
# Don't *BEFORE* to go into try to don't let it triple execute the finally section.
salt.utils.process.daemonize_if(self.opts)
# TODO: Make it readable! Splt to funcs, remove nested try-except-finally sections.
try:
minion_blackout_violation = False
if self.opts.get('pillar', {}).get('minion_blackout', False):
whitelist = self.opts.get('pillar', {}).get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
elif self.opts.get('grains', {}).get('minion_blackout', False):
whitelist = self.opts.get('grains', {}).get('minion_blackout_whitelist', [])
if func != 'saltutil.refresh_pillar' and func not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
ret['pid'] = os.getpid()
if not self.standalone:
if 'jid_include' not in data or data['jid_include']:
log.debug(
'schedule.handle_func: adding this job to the '
'jobcache with data %s', ret
)
# write this to /var/cache/salt/minion/proc
with salt.utils.files.fopen(proc_fn, 'w+b') as fp_:
fp_.write(salt.payload.Serial(self.opts).dumps(ret))
args = tuple()
if 'args' in data:
args = data['args']
ret['fun_args'].extend(data['args'])
kwargs = {}
if 'kwargs' in data:
kwargs = data['kwargs']
ret['fun_args'].append(copy.deepcopy(kwargs))
if func not in self.functions:
ret['return'] = self.functions.missing_fun_string(func)
salt.utils.error.raise_error(
message=self.functions.missing_fun_string(func))
# if the func support **kwargs, lets pack in the pub data we have
# TODO: pack the *same* pub data as a minion?
argspec = salt.utils.args.get_function_argspec(self.functions[func])
if argspec.keywords:
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(ret):
if key is not 'kwargs':
kwargs['__pub_{0}'.format(key)] = copy.deepcopy(val)
# Only include these when running runner modules
if self.opts['__role'] == 'master':
jid = salt.utils.jid.gen_jid(self.opts)
tag = salt.utils.event.tagify(jid, prefix='salt/scheduler/')
event = salt.utils.event.get_event(
self.opts['__role'],
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
namespaced_event = salt.utils.event.NamespacedEvent(
event,
tag,
print_func=None
)
func_globals = {
'__jid__': jid,
'__user__': salt.utils.user.get_user(),
'__tag__': tag,
'__jid_event__': weakref.proxy(namespaced_event),
}
self_functions = copy.copy(self.functions)
salt.utils.lazy.verify_fun(self_functions, func)
# Inject some useful globals to *all* the function's global
# namespace only once per module-- not per func
completed_funcs = []
for mod_name in six.iterkeys(self_functions):
if '.' not in mod_name:
continue
mod, _ = mod_name.split('.', 1)
if mod in completed_funcs:
continue
completed_funcs.append(mod)
for global_key, value in six.iteritems(func_globals):
self.functions[mod_name].__globals__[global_key] = value
self.functions.pack['__context__']['retcode'] = 0
ret['return'] = self.functions[func](*args, **kwargs)
if not self.standalone:
# runners do not provide retcode
if 'retcode' in self.functions.pack['__context__']:
ret['retcode'] = self.functions.pack['__context__']['retcode']
ret['success'] = True
if data_returner or self.schedule_returner:
if 'return_config' in data:
ret['ret_config'] = data['return_config']
if 'return_kwargs' in data:
ret['ret_kwargs'] = data['return_kwargs']
rets = []
for returner in [data_returner, self.schedule_returner]:
if isinstance(returner, six.string_types):
rets.append(returner)
elif isinstance(returner, list):
rets.extend(returner)
# simple de-duplication with order retained
for returner in OrderedDict.fromkeys(rets):
ret_str = '{0}.returner'.format(returner)
if ret_str in self.returners:
self.returners[ret_str](ret)
else:
log.info(
'Job %s using invalid returner: %s. Ignoring.',
func, returner
)
except Exception:
log.exception('Unhandled exception running %s', ret['fun'])
# Although catch-all exception handlers are bad, the exception here
# is to let the exception bubble up to the top of the thread context,
# where the thread will die silently, which is worse.
if 'return' not in ret:
ret['return'] = "Unhandled exception running {0}".format(ret['fun'])
ret['success'] = False
ret['retcode'] = 254
finally:
# Only attempt to return data to the master if the scheduled job is running
# on a master itself or a minion.
if '__role' in self.opts and self.opts['__role'] in ('master', 'minion'):
# The 'return_job' option is enabled by default even if not set
if 'return_job' in data and not data['return_job']:
pass
else:
# Send back to master so the job is included in the job list
mret = ret.copy()
# No returners defined, so we're only sending back to the master
if not data_returner and not self.schedule_returner:
mret['jid'] = 'req'
if data.get('return_job') == 'nocache':
# overwrite 'req' to signal to master that
# this job shouldn't be stored
mret['jid'] = 'nocache'
load = {'cmd': '_return', 'id': self.opts['id']}
for key, value in six.iteritems(mret):
load[key] = value
if '__role' in self.opts and self.opts['__role'] == 'minion':
event = salt.utils.event.get_event('minion',
opts=self.opts,
listen=False)
elif '__role' in self.opts and self.opts['__role'] == 'master':
event = salt.utils.event.get_master_event(self.opts,
self.opts['sock_dir'])
try:
event.fire_event(load, '__schedule_return')
except Exception as exc:
log.exception('Unhandled exception firing __schedule_return event')
if not self.standalone:
log.debug('schedule.handle_func: Removing %s', proc_fn)
try:
os.unlink(proc_fn)
except OSError as exc:
if exc.errno == errno.EEXIST or exc.errno == errno.ENOENT:
# EEXIST and ENOENT are OK because the file is gone and that's what
# we wanted
pass
else:
log.error("Failed to delete '%s': %s", proc_fn, exc.errno)
# Otherwise, failing to delete this file is not something
# we can cleanly handle.
raise
finally:
if multiprocessing_enabled:
# Let's make sure we exit the process!
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def _run_job(self, func, data):
job_dry_run = data.get('dry_run', False)
if job_dry_run:
log.debug('Job %s has \'dry_run\' set to True. Not running it.', data['name'])
return
multiprocessing_enabled = self.opts.get('multiprocessing', True)
run_schedule_jobs_in_background = self.opts.get('run_schedule_jobs_in_background', True)
if run_schedule_jobs_in_background is False:
# Explicitly pass False for multiprocessing_enabled
self.handle_func(False, func, data)
return
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Temporarily stash our function references.
# You can't pickle function references, and pickling is
# required when spawning new processes on Windows.
functions = self.functions
self.functions = {}
returners = self.returners
self.returners = {}
utils = self.utils
self.utils = {}
try:
if multiprocessing_enabled:
thread_cls = salt.utils.process.SignalHandlingMultiprocessingProcess
else:
thread_cls = threading.Thread
for i, _func in enumerate(func):
_data = copy.deepcopy(data)
if 'args' in _data and isinstance(_data['args'], list):
_data['args'] = _data['args'][i]
if multiprocessing_enabled:
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
proc.start()
proc.join()
else:
proc = thread_cls(target=self.handle_func, args=(multiprocessing_enabled, _func, _data))
proc.start()
finally:
if multiprocessing_enabled and salt.utils.platform.is_windows():
# Restore our function references.
self.functions = functions
self.returners = returners
self.utils = utils
|
saltstack/salt
|
salt/modules/aws_sqs.py
|
_run_aws
|
python
|
def _run_aws(cmd, region, opts, user, **kwargs):
'''
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
'''
# These args need a specific key value that aren't
# valid python parameter keys
receipthandle = kwargs.pop('receipthandle', None)
if receipthandle:
kwargs['receipt-handle'] = receipthandle
num = kwargs.pop('num', None)
if num:
kwargs['max-number-of-messages'] = num
_formatted_args = [
'--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
cmd=cmd,
args=' '.join(_formatted_args),
region=_region(region),
out=_OUTPUT)
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
return salt.utils.json.loads(rtn) if rtn else ''
|
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L34-L68
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n",
"def _region(region):\n '''\n Return the region argument.\n '''\n return ' --region {r}'.format(r=region)\n"
] |
# -*- coding: utf-8 -*-
'''
Support for the Amazon Simple Queue Service.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.json
import salt.utils.path
from salt.ext import six
log = logging.getLogger(__name__)
_OUTPUT = '--output json'
def __virtual__():
if salt.utils.path.which('aws'):
# awscli is installed, load the module
return True
return (False, 'The module aws_sqs could not be loaded: aws command not found')
def _region(region):
'''
Return the region argument.
'''
return ' --region {r}'.format(r=region)
def receive_message(queue, region, num=1, opts=None, user=None):
'''
Receive one or more messages from a queue in a region
queue
The name of the queue to receive messages from
region
Region where SQS queues exists
num : 1
The max number of messages to receive
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.receive_message <sqs queue> <region>
salt '*' aws_sqs.receive_message <sqs queue> <region> num=10
.. versionadded:: 2014.7.0
'''
ret = {
'Messages': None,
}
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return ret
out = _run_aws('receive-message', region, opts, user, queue=url_map[queue],
num=num)
ret['Messages'] = out['Messages']
return ret
def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True
def list_queues(region, opts=None, user=None):
'''
List the queues in the selected region.
region
Region to list SQS queues for
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.list_queues <region>
'''
out = _run_aws('list-queues', region, opts, user)
ret = {
'retcode': 0,
'stdout': out['QueueUrls'],
}
return ret
def create_queue(name, region, opts=None, user=None):
'''
Creates a queue with the correct name.
name
Name of the SQS queue to create
region
Region to create the SQS queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.create_queue <sqs queue> <region>
'''
create = {'queue-name': name}
out = _run_aws(
'create-queue', region=region, opts=opts,
user=user, **create)
ret = {
'retcode': 0,
'stdout': out['QueueUrl'],
'stderr': '',
}
return ret
def delete_queue(name, region, opts=None, user=None):
'''
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.delete_queue <sqs queue> <region>
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug('map %s', url_map)
if name in url_map:
delete = {'queue-url': url_map[name]}
rtn = _run_aws(
'delete-queue',
region=region,
opts=opts,
user=user,
**delete)
success = True
err = ''
out = '{0} deleted'.format(name)
else:
out = ''
err = "Delete failed"
success = False
ret = {
'retcode': 0 if success else 1,
'stdout': out,
'stderr': err,
}
return ret
def queue_exists(name, region, opts=None, user=None):
'''
Returns True or False on whether the queue exists in the region
name
Name of the SQS queue to search for
region
Name of the region to search for the queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.queue_exists <sqs queue> <region>
'''
output = list_queues(region, opts, user)
return name in _parse_queue_list(output)
def _parse_queue_list(list_output):
'''
Parse the queue to get a dict of name -> URL
'''
queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])
return queues
|
saltstack/salt
|
salt/modules/aws_sqs.py
|
receive_message
|
python
|
def receive_message(queue, region, num=1, opts=None, user=None):
'''
Receive one or more messages from a queue in a region
queue
The name of the queue to receive messages from
region
Region where SQS queues exists
num : 1
The max number of messages to receive
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.receive_message <sqs queue> <region>
salt '*' aws_sqs.receive_message <sqs queue> <region> num=10
.. versionadded:: 2014.7.0
'''
ret = {
'Messages': None,
}
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return ret
out = _run_aws('receive-message', region, opts, user, queue=url_map[queue],
num=num)
ret['Messages'] = out['Messages']
return ret
|
Receive one or more messages from a queue in a region
queue
The name of the queue to receive messages from
region
Region where SQS queues exists
num : 1
The max number of messages to receive
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.receive_message <sqs queue> <region>
salt '*' aws_sqs.receive_message <sqs queue> <region> num=10
.. versionadded:: 2014.7.0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L71-L112
|
[
"def _run_aws(cmd, region, opts, user, **kwargs):\n '''\n Runs the given command against AWS.\n cmd\n Command to run\n region\n Region to execute cmd in\n opts\n Pass in from salt\n user\n Pass in from salt\n kwargs\n Key-value arguments to pass to the command\n '''\n # These args need a specific key value that aren't\n # valid python parameter keys\n receipthandle = kwargs.pop('receipthandle', None)\n if receipthandle:\n kwargs['receipt-handle'] = receipthandle\n num = kwargs.pop('num', None)\n if num:\n kwargs['max-number-of-messages'] = num\n\n _formatted_args = [\n '--{0} \"{1}\"'.format(k, v) for k, v in six.iteritems(kwargs)]\n\n cmd = 'aws sqs {cmd} {args} {region} {out}'.format(\n cmd=cmd,\n args=' '.join(_formatted_args),\n region=_region(region),\n out=_OUTPUT)\n\n rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)\n\n return salt.utils.json.loads(rtn) if rtn else ''\n",
"def list_queues(region, opts=None, user=None):\n '''\n List the queues in the selected region.\n\n region\n Region to list SQS queues for\n\n opts : None\n Any additional options to add to the command line\n\n user : None\n Run hg as a user other than what the minion runs as\n\n CLI Example:\n\n salt '*' aws_sqs.list_queues <region>\n\n '''\n out = _run_aws('list-queues', region, opts, user)\n\n ret = {\n 'retcode': 0,\n 'stdout': out['QueueUrls'],\n }\n return ret\n",
"def _parse_queue_list(list_output):\n '''\n Parse the queue to get a dict of name -> URL\n '''\n queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])\n return queues\n"
] |
# -*- coding: utf-8 -*-
'''
Support for the Amazon Simple Queue Service.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.json
import salt.utils.path
from salt.ext import six
log = logging.getLogger(__name__)
_OUTPUT = '--output json'
def __virtual__():
if salt.utils.path.which('aws'):
# awscli is installed, load the module
return True
return (False, 'The module aws_sqs could not be loaded: aws command not found')
def _region(region):
'''
Return the region argument.
'''
return ' --region {r}'.format(r=region)
def _run_aws(cmd, region, opts, user, **kwargs):
'''
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
'''
# These args need a specific key value that aren't
# valid python parameter keys
receipthandle = kwargs.pop('receipthandle', None)
if receipthandle:
kwargs['receipt-handle'] = receipthandle
num = kwargs.pop('num', None)
if num:
kwargs['max-number-of-messages'] = num
_formatted_args = [
'--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
cmd=cmd,
args=' '.join(_formatted_args),
region=_region(region),
out=_OUTPUT)
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
return salt.utils.json.loads(rtn) if rtn else ''
def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True
def list_queues(region, opts=None, user=None):
'''
List the queues in the selected region.
region
Region to list SQS queues for
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.list_queues <region>
'''
out = _run_aws('list-queues', region, opts, user)
ret = {
'retcode': 0,
'stdout': out['QueueUrls'],
}
return ret
def create_queue(name, region, opts=None, user=None):
'''
Creates a queue with the correct name.
name
Name of the SQS queue to create
region
Region to create the SQS queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.create_queue <sqs queue> <region>
'''
create = {'queue-name': name}
out = _run_aws(
'create-queue', region=region, opts=opts,
user=user, **create)
ret = {
'retcode': 0,
'stdout': out['QueueUrl'],
'stderr': '',
}
return ret
def delete_queue(name, region, opts=None, user=None):
'''
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.delete_queue <sqs queue> <region>
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug('map %s', url_map)
if name in url_map:
delete = {'queue-url': url_map[name]}
rtn = _run_aws(
'delete-queue',
region=region,
opts=opts,
user=user,
**delete)
success = True
err = ''
out = '{0} deleted'.format(name)
else:
out = ''
err = "Delete failed"
success = False
ret = {
'retcode': 0 if success else 1,
'stdout': out,
'stderr': err,
}
return ret
def queue_exists(name, region, opts=None, user=None):
'''
Returns True or False on whether the queue exists in the region
name
Name of the SQS queue to search for
region
Name of the region to search for the queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.queue_exists <sqs queue> <region>
'''
output = list_queues(region, opts, user)
return name in _parse_queue_list(output)
def _parse_queue_list(list_output):
'''
Parse the queue to get a dict of name -> URL
'''
queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])
return queues
|
saltstack/salt
|
salt/modules/aws_sqs.py
|
delete_message
|
python
|
def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True
|
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L115-L152
|
[
"def _run_aws(cmd, region, opts, user, **kwargs):\n '''\n Runs the given command against AWS.\n cmd\n Command to run\n region\n Region to execute cmd in\n opts\n Pass in from salt\n user\n Pass in from salt\n kwargs\n Key-value arguments to pass to the command\n '''\n # These args need a specific key value that aren't\n # valid python parameter keys\n receipthandle = kwargs.pop('receipthandle', None)\n if receipthandle:\n kwargs['receipt-handle'] = receipthandle\n num = kwargs.pop('num', None)\n if num:\n kwargs['max-number-of-messages'] = num\n\n _formatted_args = [\n '--{0} \"{1}\"'.format(k, v) for k, v in six.iteritems(kwargs)]\n\n cmd = 'aws sqs {cmd} {args} {region} {out}'.format(\n cmd=cmd,\n args=' '.join(_formatted_args),\n region=_region(region),\n out=_OUTPUT)\n\n rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)\n\n return salt.utils.json.loads(rtn) if rtn else ''\n",
"def list_queues(region, opts=None, user=None):\n '''\n List the queues in the selected region.\n\n region\n Region to list SQS queues for\n\n opts : None\n Any additional options to add to the command line\n\n user : None\n Run hg as a user other than what the minion runs as\n\n CLI Example:\n\n salt '*' aws_sqs.list_queues <region>\n\n '''\n out = _run_aws('list-queues', region, opts, user)\n\n ret = {\n 'retcode': 0,\n 'stdout': out['QueueUrls'],\n }\n return ret\n",
"def _parse_queue_list(list_output):\n '''\n Parse the queue to get a dict of name -> URL\n '''\n queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])\n return queues\n"
] |
# -*- coding: utf-8 -*-
'''
Support for the Amazon Simple Queue Service.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.json
import salt.utils.path
from salt.ext import six
log = logging.getLogger(__name__)
_OUTPUT = '--output json'
def __virtual__():
if salt.utils.path.which('aws'):
# awscli is installed, load the module
return True
return (False, 'The module aws_sqs could not be loaded: aws command not found')
def _region(region):
'''
Return the region argument.
'''
return ' --region {r}'.format(r=region)
def _run_aws(cmd, region, opts, user, **kwargs):
'''
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
'''
# These args need a specific key value that aren't
# valid python parameter keys
receipthandle = kwargs.pop('receipthandle', None)
if receipthandle:
kwargs['receipt-handle'] = receipthandle
num = kwargs.pop('num', None)
if num:
kwargs['max-number-of-messages'] = num
_formatted_args = [
'--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
cmd=cmd,
args=' '.join(_formatted_args),
region=_region(region),
out=_OUTPUT)
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
return salt.utils.json.loads(rtn) if rtn else ''
def receive_message(queue, region, num=1, opts=None, user=None):
'''
Receive one or more messages from a queue in a region
queue
The name of the queue to receive messages from
region
Region where SQS queues exists
num : 1
The max number of messages to receive
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.receive_message <sqs queue> <region>
salt '*' aws_sqs.receive_message <sqs queue> <region> num=10
.. versionadded:: 2014.7.0
'''
ret = {
'Messages': None,
}
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return ret
out = _run_aws('receive-message', region, opts, user, queue=url_map[queue],
num=num)
ret['Messages'] = out['Messages']
return ret
def list_queues(region, opts=None, user=None):
'''
List the queues in the selected region.
region
Region to list SQS queues for
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.list_queues <region>
'''
out = _run_aws('list-queues', region, opts, user)
ret = {
'retcode': 0,
'stdout': out['QueueUrls'],
}
return ret
def create_queue(name, region, opts=None, user=None):
'''
Creates a queue with the correct name.
name
Name of the SQS queue to create
region
Region to create the SQS queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.create_queue <sqs queue> <region>
'''
create = {'queue-name': name}
out = _run_aws(
'create-queue', region=region, opts=opts,
user=user, **create)
ret = {
'retcode': 0,
'stdout': out['QueueUrl'],
'stderr': '',
}
return ret
def delete_queue(name, region, opts=None, user=None):
'''
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.delete_queue <sqs queue> <region>
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug('map %s', url_map)
if name in url_map:
delete = {'queue-url': url_map[name]}
rtn = _run_aws(
'delete-queue',
region=region,
opts=opts,
user=user,
**delete)
success = True
err = ''
out = '{0} deleted'.format(name)
else:
out = ''
err = "Delete failed"
success = False
ret = {
'retcode': 0 if success else 1,
'stdout': out,
'stderr': err,
}
return ret
def queue_exists(name, region, opts=None, user=None):
'''
Returns True or False on whether the queue exists in the region
name
Name of the SQS queue to search for
region
Name of the region to search for the queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.queue_exists <sqs queue> <region>
'''
output = list_queues(region, opts, user)
return name in _parse_queue_list(output)
def _parse_queue_list(list_output):
'''
Parse the queue to get a dict of name -> URL
'''
queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])
return queues
|
saltstack/salt
|
salt/modules/aws_sqs.py
|
list_queues
|
python
|
def list_queues(region, opts=None, user=None):
'''
List the queues in the selected region.
region
Region to list SQS queues for
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.list_queues <region>
'''
out = _run_aws('list-queues', region, opts, user)
ret = {
'retcode': 0,
'stdout': out['QueueUrls'],
}
return ret
|
List the queues in the selected region.
region
Region to list SQS queues for
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.list_queues <region>
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L155-L179
|
[
"def _run_aws(cmd, region, opts, user, **kwargs):\n '''\n Runs the given command against AWS.\n cmd\n Command to run\n region\n Region to execute cmd in\n opts\n Pass in from salt\n user\n Pass in from salt\n kwargs\n Key-value arguments to pass to the command\n '''\n # These args need a specific key value that aren't\n # valid python parameter keys\n receipthandle = kwargs.pop('receipthandle', None)\n if receipthandle:\n kwargs['receipt-handle'] = receipthandle\n num = kwargs.pop('num', None)\n if num:\n kwargs['max-number-of-messages'] = num\n\n _formatted_args = [\n '--{0} \"{1}\"'.format(k, v) for k, v in six.iteritems(kwargs)]\n\n cmd = 'aws sqs {cmd} {args} {region} {out}'.format(\n cmd=cmd,\n args=' '.join(_formatted_args),\n region=_region(region),\n out=_OUTPUT)\n\n rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)\n\n return salt.utils.json.loads(rtn) if rtn else ''\n"
] |
# -*- coding: utf-8 -*-
'''
Support for the Amazon Simple Queue Service.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.json
import salt.utils.path
from salt.ext import six
log = logging.getLogger(__name__)
_OUTPUT = '--output json'
def __virtual__():
if salt.utils.path.which('aws'):
# awscli is installed, load the module
return True
return (False, 'The module aws_sqs could not be loaded: aws command not found')
def _region(region):
'''
Return the region argument.
'''
return ' --region {r}'.format(r=region)
def _run_aws(cmd, region, opts, user, **kwargs):
'''
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
'''
# These args need a specific key value that aren't
# valid python parameter keys
receipthandle = kwargs.pop('receipthandle', None)
if receipthandle:
kwargs['receipt-handle'] = receipthandle
num = kwargs.pop('num', None)
if num:
kwargs['max-number-of-messages'] = num
_formatted_args = [
'--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
cmd=cmd,
args=' '.join(_formatted_args),
region=_region(region),
out=_OUTPUT)
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
return salt.utils.json.loads(rtn) if rtn else ''
def receive_message(queue, region, num=1, opts=None, user=None):
'''
Receive one or more messages from a queue in a region
queue
The name of the queue to receive messages from
region
Region where SQS queues exists
num : 1
The max number of messages to receive
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.receive_message <sqs queue> <region>
salt '*' aws_sqs.receive_message <sqs queue> <region> num=10
.. versionadded:: 2014.7.0
'''
ret = {
'Messages': None,
}
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return ret
out = _run_aws('receive-message', region, opts, user, queue=url_map[queue],
num=num)
ret['Messages'] = out['Messages']
return ret
def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True
def create_queue(name, region, opts=None, user=None):
'''
Creates a queue with the correct name.
name
Name of the SQS queue to create
region
Region to create the SQS queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.create_queue <sqs queue> <region>
'''
create = {'queue-name': name}
out = _run_aws(
'create-queue', region=region, opts=opts,
user=user, **create)
ret = {
'retcode': 0,
'stdout': out['QueueUrl'],
'stderr': '',
}
return ret
def delete_queue(name, region, opts=None, user=None):
'''
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.delete_queue <sqs queue> <region>
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug('map %s', url_map)
if name in url_map:
delete = {'queue-url': url_map[name]}
rtn = _run_aws(
'delete-queue',
region=region,
opts=opts,
user=user,
**delete)
success = True
err = ''
out = '{0} deleted'.format(name)
else:
out = ''
err = "Delete failed"
success = False
ret = {
'retcode': 0 if success else 1,
'stdout': out,
'stderr': err,
}
return ret
def queue_exists(name, region, opts=None, user=None):
'''
Returns True or False on whether the queue exists in the region
name
Name of the SQS queue to search for
region
Name of the region to search for the queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.queue_exists <sqs queue> <region>
'''
output = list_queues(region, opts, user)
return name in _parse_queue_list(output)
def _parse_queue_list(list_output):
'''
Parse the queue to get a dict of name -> URL
'''
queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])
return queues
|
saltstack/salt
|
salt/modules/aws_sqs.py
|
create_queue
|
python
|
def create_queue(name, region, opts=None, user=None):
'''
Creates a queue with the correct name.
name
Name of the SQS queue to create
region
Region to create the SQS queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.create_queue <sqs queue> <region>
'''
create = {'queue-name': name}
out = _run_aws(
'create-queue', region=region, opts=opts,
user=user, **create)
ret = {
'retcode': 0,
'stdout': out['QueueUrl'],
'stderr': '',
}
return ret
|
Creates a queue with the correct name.
name
Name of the SQS queue to create
region
Region to create the SQS queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.create_queue <sqs queue> <region>
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L182-L214
|
[
"def _run_aws(cmd, region, opts, user, **kwargs):\n '''\n Runs the given command against AWS.\n cmd\n Command to run\n region\n Region to execute cmd in\n opts\n Pass in from salt\n user\n Pass in from salt\n kwargs\n Key-value arguments to pass to the command\n '''\n # These args need a specific key value that aren't\n # valid python parameter keys\n receipthandle = kwargs.pop('receipthandle', None)\n if receipthandle:\n kwargs['receipt-handle'] = receipthandle\n num = kwargs.pop('num', None)\n if num:\n kwargs['max-number-of-messages'] = num\n\n _formatted_args = [\n '--{0} \"{1}\"'.format(k, v) for k, v in six.iteritems(kwargs)]\n\n cmd = 'aws sqs {cmd} {args} {region} {out}'.format(\n cmd=cmd,\n args=' '.join(_formatted_args),\n region=_region(region),\n out=_OUTPUT)\n\n rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)\n\n return salt.utils.json.loads(rtn) if rtn else ''\n"
] |
# -*- coding: utf-8 -*-
'''
Support for the Amazon Simple Queue Service.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.json
import salt.utils.path
from salt.ext import six
log = logging.getLogger(__name__)
_OUTPUT = '--output json'
def __virtual__():
if salt.utils.path.which('aws'):
# awscli is installed, load the module
return True
return (False, 'The module aws_sqs could not be loaded: aws command not found')
def _region(region):
'''
Return the region argument.
'''
return ' --region {r}'.format(r=region)
def _run_aws(cmd, region, opts, user, **kwargs):
'''
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
'''
# These args need a specific key value that aren't
# valid python parameter keys
receipthandle = kwargs.pop('receipthandle', None)
if receipthandle:
kwargs['receipt-handle'] = receipthandle
num = kwargs.pop('num', None)
if num:
kwargs['max-number-of-messages'] = num
_formatted_args = [
'--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
cmd=cmd,
args=' '.join(_formatted_args),
region=_region(region),
out=_OUTPUT)
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
return salt.utils.json.loads(rtn) if rtn else ''
def receive_message(queue, region, num=1, opts=None, user=None):
'''
Receive one or more messages from a queue in a region
queue
The name of the queue to receive messages from
region
Region where SQS queues exists
num : 1
The max number of messages to receive
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.receive_message <sqs queue> <region>
salt '*' aws_sqs.receive_message <sqs queue> <region> num=10
.. versionadded:: 2014.7.0
'''
ret = {
'Messages': None,
}
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return ret
out = _run_aws('receive-message', region, opts, user, queue=url_map[queue],
num=num)
ret['Messages'] = out['Messages']
return ret
def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True
def list_queues(region, opts=None, user=None):
'''
List the queues in the selected region.
region
Region to list SQS queues for
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.list_queues <region>
'''
out = _run_aws('list-queues', region, opts, user)
ret = {
'retcode': 0,
'stdout': out['QueueUrls'],
}
return ret
def delete_queue(name, region, opts=None, user=None):
'''
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.delete_queue <sqs queue> <region>
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug('map %s', url_map)
if name in url_map:
delete = {'queue-url': url_map[name]}
rtn = _run_aws(
'delete-queue',
region=region,
opts=opts,
user=user,
**delete)
success = True
err = ''
out = '{0} deleted'.format(name)
else:
out = ''
err = "Delete failed"
success = False
ret = {
'retcode': 0 if success else 1,
'stdout': out,
'stderr': err,
}
return ret
def queue_exists(name, region, opts=None, user=None):
'''
Returns True or False on whether the queue exists in the region
name
Name of the SQS queue to search for
region
Name of the region to search for the queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.queue_exists <sqs queue> <region>
'''
output = list_queues(region, opts, user)
return name in _parse_queue_list(output)
def _parse_queue_list(list_output):
'''
Parse the queue to get a dict of name -> URL
'''
queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])
return queues
|
saltstack/salt
|
salt/modules/aws_sqs.py
|
delete_queue
|
python
|
def delete_queue(name, region, opts=None, user=None):
'''
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.delete_queue <sqs queue> <region>
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug('map %s', url_map)
if name in url_map:
delete = {'queue-url': url_map[name]}
rtn = _run_aws(
'delete-queue',
region=region,
opts=opts,
user=user,
**delete)
success = True
err = ''
out = '{0} deleted'.format(name)
else:
out = ''
err = "Delete failed"
success = False
ret = {
'retcode': 0 if success else 1,
'stdout': out,
'stderr': err,
}
return ret
|
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.delete_queue <sqs queue> <region>
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L217-L264
|
[
"def _run_aws(cmd, region, opts, user, **kwargs):\n '''\n Runs the given command against AWS.\n cmd\n Command to run\n region\n Region to execute cmd in\n opts\n Pass in from salt\n user\n Pass in from salt\n kwargs\n Key-value arguments to pass to the command\n '''\n # These args need a specific key value that aren't\n # valid python parameter keys\n receipthandle = kwargs.pop('receipthandle', None)\n if receipthandle:\n kwargs['receipt-handle'] = receipthandle\n num = kwargs.pop('num', None)\n if num:\n kwargs['max-number-of-messages'] = num\n\n _formatted_args = [\n '--{0} \"{1}\"'.format(k, v) for k, v in six.iteritems(kwargs)]\n\n cmd = 'aws sqs {cmd} {args} {region} {out}'.format(\n cmd=cmd,\n args=' '.join(_formatted_args),\n region=_region(region),\n out=_OUTPUT)\n\n rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)\n\n return salt.utils.json.loads(rtn) if rtn else ''\n",
"def list_queues(region, opts=None, user=None):\n '''\n List the queues in the selected region.\n\n region\n Region to list SQS queues for\n\n opts : None\n Any additional options to add to the command line\n\n user : None\n Run hg as a user other than what the minion runs as\n\n CLI Example:\n\n salt '*' aws_sqs.list_queues <region>\n\n '''\n out = _run_aws('list-queues', region, opts, user)\n\n ret = {\n 'retcode': 0,\n 'stdout': out['QueueUrls'],\n }\n return ret\n",
"def _parse_queue_list(list_output):\n '''\n Parse the queue to get a dict of name -> URL\n '''\n queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])\n return queues\n"
] |
# -*- coding: utf-8 -*-
'''
Support for the Amazon Simple Queue Service.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.json
import salt.utils.path
from salt.ext import six
log = logging.getLogger(__name__)
_OUTPUT = '--output json'
def __virtual__():
if salt.utils.path.which('aws'):
# awscli is installed, load the module
return True
return (False, 'The module aws_sqs could not be loaded: aws command not found')
def _region(region):
'''
Return the region argument.
'''
return ' --region {r}'.format(r=region)
def _run_aws(cmd, region, opts, user, **kwargs):
'''
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
'''
# These args need a specific key value that aren't
# valid python parameter keys
receipthandle = kwargs.pop('receipthandle', None)
if receipthandle:
kwargs['receipt-handle'] = receipthandle
num = kwargs.pop('num', None)
if num:
kwargs['max-number-of-messages'] = num
_formatted_args = [
'--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
cmd=cmd,
args=' '.join(_formatted_args),
region=_region(region),
out=_OUTPUT)
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
return salt.utils.json.loads(rtn) if rtn else ''
def receive_message(queue, region, num=1, opts=None, user=None):
'''
Receive one or more messages from a queue in a region
queue
The name of the queue to receive messages from
region
Region where SQS queues exists
num : 1
The max number of messages to receive
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.receive_message <sqs queue> <region>
salt '*' aws_sqs.receive_message <sqs queue> <region> num=10
.. versionadded:: 2014.7.0
'''
ret = {
'Messages': None,
}
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return ret
out = _run_aws('receive-message', region, opts, user, queue=url_map[queue],
num=num)
ret['Messages'] = out['Messages']
return ret
def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True
def list_queues(region, opts=None, user=None):
'''
List the queues in the selected region.
region
Region to list SQS queues for
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.list_queues <region>
'''
out = _run_aws('list-queues', region, opts, user)
ret = {
'retcode': 0,
'stdout': out['QueueUrls'],
}
return ret
def create_queue(name, region, opts=None, user=None):
'''
Creates a queue with the correct name.
name
Name of the SQS queue to create
region
Region to create the SQS queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.create_queue <sqs queue> <region>
'''
create = {'queue-name': name}
out = _run_aws(
'create-queue', region=region, opts=opts,
user=user, **create)
ret = {
'retcode': 0,
'stdout': out['QueueUrl'],
'stderr': '',
}
return ret
def queue_exists(name, region, opts=None, user=None):
'''
Returns True or False on whether the queue exists in the region
name
Name of the SQS queue to search for
region
Name of the region to search for the queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.queue_exists <sqs queue> <region>
'''
output = list_queues(region, opts, user)
return name in _parse_queue_list(output)
def _parse_queue_list(list_output):
'''
Parse the queue to get a dict of name -> URL
'''
queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])
return queues
|
saltstack/salt
|
salt/modules/aws_sqs.py
|
queue_exists
|
python
|
def queue_exists(name, region, opts=None, user=None):
'''
Returns True or False on whether the queue exists in the region
name
Name of the SQS queue to search for
region
Name of the region to search for the queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.queue_exists <sqs queue> <region>
'''
output = list_queues(region, opts, user)
return name in _parse_queue_list(output)
|
Returns True or False on whether the queue exists in the region
name
Name of the SQS queue to search for
region
Name of the region to search for the queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.queue_exists <sqs queue> <region>
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L267-L290
|
[
"def list_queues(region, opts=None, user=None):\n '''\n List the queues in the selected region.\n\n region\n Region to list SQS queues for\n\n opts : None\n Any additional options to add to the command line\n\n user : None\n Run hg as a user other than what the minion runs as\n\n CLI Example:\n\n salt '*' aws_sqs.list_queues <region>\n\n '''\n out = _run_aws('list-queues', region, opts, user)\n\n ret = {\n 'retcode': 0,\n 'stdout': out['QueueUrls'],\n }\n return ret\n",
"def _parse_queue_list(list_output):\n '''\n Parse the queue to get a dict of name -> URL\n '''\n queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])\n return queues\n"
] |
# -*- coding: utf-8 -*-
'''
Support for the Amazon Simple Queue Service.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.json
import salt.utils.path
from salt.ext import six
log = logging.getLogger(__name__)
_OUTPUT = '--output json'
def __virtual__():
if salt.utils.path.which('aws'):
# awscli is installed, load the module
return True
return (False, 'The module aws_sqs could not be loaded: aws command not found')
def _region(region):
'''
Return the region argument.
'''
return ' --region {r}'.format(r=region)
def _run_aws(cmd, region, opts, user, **kwargs):
'''
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
'''
# These args need a specific key value that aren't
# valid python parameter keys
receipthandle = kwargs.pop('receipthandle', None)
if receipthandle:
kwargs['receipt-handle'] = receipthandle
num = kwargs.pop('num', None)
if num:
kwargs['max-number-of-messages'] = num
_formatted_args = [
'--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
cmd=cmd,
args=' '.join(_formatted_args),
region=_region(region),
out=_OUTPUT)
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
return salt.utils.json.loads(rtn) if rtn else ''
def receive_message(queue, region, num=1, opts=None, user=None):
'''
Receive one or more messages from a queue in a region
queue
The name of the queue to receive messages from
region
Region where SQS queues exists
num : 1
The max number of messages to receive
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.receive_message <sqs queue> <region>
salt '*' aws_sqs.receive_message <sqs queue> <region> num=10
.. versionadded:: 2014.7.0
'''
ret = {
'Messages': None,
}
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return ret
out = _run_aws('receive-message', region, opts, user, queue=url_map[queue],
num=num)
ret['Messages'] = out['Messages']
return ret
def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True
def list_queues(region, opts=None, user=None):
'''
List the queues in the selected region.
region
Region to list SQS queues for
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.list_queues <region>
'''
out = _run_aws('list-queues', region, opts, user)
ret = {
'retcode': 0,
'stdout': out['QueueUrls'],
}
return ret
def create_queue(name, region, opts=None, user=None):
'''
Creates a queue with the correct name.
name
Name of the SQS queue to create
region
Region to create the SQS queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.create_queue <sqs queue> <region>
'''
create = {'queue-name': name}
out = _run_aws(
'create-queue', region=region, opts=opts,
user=user, **create)
ret = {
'retcode': 0,
'stdout': out['QueueUrl'],
'stderr': '',
}
return ret
def delete_queue(name, region, opts=None, user=None):
'''
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.delete_queue <sqs queue> <region>
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug('map %s', url_map)
if name in url_map:
delete = {'queue-url': url_map[name]}
rtn = _run_aws(
'delete-queue',
region=region,
opts=opts,
user=user,
**delete)
success = True
err = ''
out = '{0} deleted'.format(name)
else:
out = ''
err = "Delete failed"
success = False
ret = {
'retcode': 0 if success else 1,
'stdout': out,
'stderr': err,
}
return ret
def _parse_queue_list(list_output):
'''
Parse the queue to get a dict of name -> URL
'''
queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])
return queues
|
saltstack/salt
|
salt/modules/aws_sqs.py
|
_parse_queue_list
|
python
|
def _parse_queue_list(list_output):
'''
Parse the queue to get a dict of name -> URL
'''
queues = dict((q.split('/')[-1], q) for q in list_output['stdout'])
return queues
|
Parse the queue to get a dict of name -> URL
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aws_sqs.py#L293-L298
| null |
# -*- coding: utf-8 -*-
'''
Support for the Amazon Simple Queue Service.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import salt libs
import salt.utils.json
import salt.utils.path
from salt.ext import six
log = logging.getLogger(__name__)
_OUTPUT = '--output json'
def __virtual__():
if salt.utils.path.which('aws'):
# awscli is installed, load the module
return True
return (False, 'The module aws_sqs could not be loaded: aws command not found')
def _region(region):
'''
Return the region argument.
'''
return ' --region {r}'.format(r=region)
def _run_aws(cmd, region, opts, user, **kwargs):
'''
Runs the given command against AWS.
cmd
Command to run
region
Region to execute cmd in
opts
Pass in from salt
user
Pass in from salt
kwargs
Key-value arguments to pass to the command
'''
# These args need a specific key value that aren't
# valid python parameter keys
receipthandle = kwargs.pop('receipthandle', None)
if receipthandle:
kwargs['receipt-handle'] = receipthandle
num = kwargs.pop('num', None)
if num:
kwargs['max-number-of-messages'] = num
_formatted_args = [
'--{0} "{1}"'.format(k, v) for k, v in six.iteritems(kwargs)]
cmd = 'aws sqs {cmd} {args} {region} {out}'.format(
cmd=cmd,
args=' '.join(_formatted_args),
region=_region(region),
out=_OUTPUT)
rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False)
return salt.utils.json.loads(rtn) if rtn else ''
def receive_message(queue, region, num=1, opts=None, user=None):
'''
Receive one or more messages from a queue in a region
queue
The name of the queue to receive messages from
region
Region where SQS queues exists
num : 1
The max number of messages to receive
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.receive_message <sqs queue> <region>
salt '*' aws_sqs.receive_message <sqs queue> <region> num=10
.. versionadded:: 2014.7.0
'''
ret = {
'Messages': None,
}
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return ret
out = _run_aws('receive-message', region, opts, user, queue=url_map[queue],
num=num)
ret['Messages'] = out['Messages']
return ret
def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True
def list_queues(region, opts=None, user=None):
'''
List the queues in the selected region.
region
Region to list SQS queues for
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.list_queues <region>
'''
out = _run_aws('list-queues', region, opts, user)
ret = {
'retcode': 0,
'stdout': out['QueueUrls'],
}
return ret
def create_queue(name, region, opts=None, user=None):
'''
Creates a queue with the correct name.
name
Name of the SQS queue to create
region
Region to create the SQS queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.create_queue <sqs queue> <region>
'''
create = {'queue-name': name}
out = _run_aws(
'create-queue', region=region, opts=opts,
user=user, **create)
ret = {
'retcode': 0,
'stdout': out['QueueUrl'],
'stderr': '',
}
return ret
def delete_queue(name, region, opts=None, user=None):
'''
Deletes a queue in the region.
name
Name of the SQS queue to deletes
region
Name of the region to delete the queue from
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.delete_queue <sqs queue> <region>
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
log.debug('map %s', url_map)
if name in url_map:
delete = {'queue-url': url_map[name]}
rtn = _run_aws(
'delete-queue',
region=region,
opts=opts,
user=user,
**delete)
success = True
err = ''
out = '{0} deleted'.format(name)
else:
out = ''
err = "Delete failed"
success = False
ret = {
'retcode': 0 if success else 1,
'stdout': out,
'stderr': err,
}
return ret
def queue_exists(name, region, opts=None, user=None):
'''
Returns True or False on whether the queue exists in the region
name
Name of the SQS queue to search for
region
Name of the region to search for the queue in
opts : None
Any additional options to add to the command line
user : None
Run hg as a user other than what the minion runs as
CLI Example:
salt '*' aws_sqs.queue_exists <sqs queue> <region>
'''
output = list_queues(region, opts, user)
return name in _parse_queue_list(output)
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_get_session
|
python
|
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
|
Get a connection to the XenServer host
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L131-L181
|
[
"def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):\n '''\n Search and return a setting in a known order:\n\n 1. In the virtual machine's configuration\n 2. In the virtual machine's profile configuration\n 3. In the virtual machine's provider configuration\n 4. In the salt cloud configuration if global searching is enabled\n 5. Return the provided default\n '''\n\n # As a last resort, return the default\n value = default\n\n if search_global is True and opts.get(name, None) is not None:\n # The setting name exists in the cloud(global) configuration\n value = deepcopy(opts[name])\n\n if vm_ and name:\n # Let's get the value from the profile, if present\n if 'profile' in vm_ and vm_['profile'] is not None:\n if name in opts['profiles'][vm_['profile']]:\n if isinstance(value, dict):\n value.update(opts['profiles'][vm_['profile']][name].copy())\n else:\n value = deepcopy(opts['profiles'][vm_['profile']][name])\n\n # Let's get the value from the provider, if present.\n if ':' in vm_['driver']:\n # The provider is defined as <provider-alias>:<driver-name>\n alias, driver = vm_['driver'].split(':')\n if alias in opts['providers'] and \\\n driver in opts['providers'][alias]:\n details = opts['providers'][alias][driver]\n if name in details:\n if isinstance(value, dict):\n value.update(details[name].copy())\n else:\n value = deepcopy(details[name])\n elif len(opts['providers'].get(vm_['driver'], ())) > 1:\n # The provider is NOT defined as <provider-alias>:<driver-name>\n # and there's more than one entry under the alias.\n # WARN the user!!!!\n log.error(\n \"The '%s' cloud provider definition has more than one \"\n 'entry. Your VM configuration should be specifying the '\n \"provider as 'driver: %s:<driver-engine>'. Since \"\n \"it's not, we're returning the first definition which \"\n 'might not be what you intended.',\n vm_['driver'], vm_['driver']\n )\n\n if vm_['driver'] in opts['providers']:\n # There's only one driver defined for this provider. This is safe.\n alias_defs = opts['providers'].get(vm_['driver'])\n provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]\n if name in provider_driver_defs:\n # The setting name exists in the VM's provider configuration.\n # Return it!\n if isinstance(value, dict):\n value.update(provider_driver_defs[name].copy())\n else:\n value = deepcopy(provider_driver_defs[name])\n\n if name and vm_ and name in vm_:\n # The setting name exists in VM configuration.\n if isinstance(vm_[name], types.GeneratorType):\n value = next(vm_[name], '')\n else:\n if isinstance(value, dict) and isinstance(vm_[name], dict):\n value.update(vm_[name].copy())\n else:\n value = deepcopy(vm_[name])\n\n return value\n",
"def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('url',)\n )\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
list_nodes
|
python
|
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
|
List virtual machines
.. code-block:: bash
salt-cloud -Q
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L184-L214
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def get_vm_ip(name=None, session=None, call=None):\n '''\n Get the IP address of the VM\n\n .. code-block:: bash\n\n salt-cloud -a get_vm_ip xenvm01\n\n .. note:: Requires xen guest tools to be installed in VM\n\n '''\n if call == 'function':\n raise SaltCloudException(\n 'This function must be called with -a or --action.'\n )\n if session is None:\n log.debug('New session being created')\n session = _get_session()\n vm = _get_vm(name, session=session)\n ret = None\n # -- try to get ip from vif\n vifs = session.xenapi.VM.get_VIFs(vm)\n if vifs is not None:\n for vif in vifs:\n if session.xenapi.VIF.get_ipv4_addresses(vif):\n cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()\n ret, subnet = cidr.split('/')\n log.debug(\n 'VM vif returned for instance: %s ip: %s', name, ret)\n return ret\n # -- try to get ip from get tools metrics\n vgm = session.xenapi.VM.get_guest_metrics(vm)\n try:\n net = session.xenapi.VM_guest_metrics.get_networks(vgm)\n if \"0/ip\" in net.keys():\n log.debug(\n 'VM guest metrics returned for instance: %s 0/ip: %s',\n name, net[\"0/ip\"]\n )\n ret = net[\"0/ip\"]\n # except Exception as ex:\n except XenAPI.Failure:\n log.info('Could not get vm metrics at this time')\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
get_vm_ip
|
python
|
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
|
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L217-L260
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
set_vm_ip
|
python
|
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
|
Set the IP address on a virtual interface (vif)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L263-L302
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
list_nodes_full
|
python
|
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
|
List full virtual machines
.. code-block:: bash
salt-cloud -F
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L305-L351
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def get_vm_ip(name=None, session=None, call=None):\n '''\n Get the IP address of the VM\n\n .. code-block:: bash\n\n salt-cloud -a get_vm_ip xenvm01\n\n .. note:: Requires xen guest tools to be installed in VM\n\n '''\n if call == 'function':\n raise SaltCloudException(\n 'This function must be called with -a or --action.'\n )\n if session is None:\n log.debug('New session being created')\n session = _get_session()\n vm = _get_vm(name, session=session)\n ret = None\n # -- try to get ip from vif\n vifs = session.xenapi.VM.get_VIFs(vm)\n if vifs is not None:\n for vif in vifs:\n if session.xenapi.VIF.get_ipv4_addresses(vif):\n cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()\n ret, subnet = cidr.split('/')\n log.debug(\n 'VM vif returned for instance: %s ip: %s', name, ret)\n return ret\n # -- try to get ip from get tools metrics\n vgm = session.xenapi.VM.get_guest_metrics(vm)\n try:\n net = session.xenapi.VM_guest_metrics.get_networks(vgm)\n if \"0/ip\" in net.keys():\n log.debug(\n 'VM guest metrics returned for instance: %s 0/ip: %s',\n name, net[\"0/ip\"]\n )\n ret = net[\"0/ip\"]\n # except Exception as ex:\n except XenAPI.Failure:\n log.info('Could not get vm metrics at this time')\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
vdi_list
|
python
|
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
|
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L370-L411
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
template_list
|
python
|
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
|
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L447-L466
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
show_instance
|
python
|
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
|
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L469-L511
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def get_vm_ip(name=None, session=None, call=None):\n '''\n Get the IP address of the VM\n\n .. code-block:: bash\n\n salt-cloud -a get_vm_ip xenvm01\n\n .. note:: Requires xen guest tools to be installed in VM\n\n '''\n if call == 'function':\n raise SaltCloudException(\n 'This function must be called with -a or --action.'\n )\n if session is None:\n log.debug('New session being created')\n session = _get_session()\n vm = _get_vm(name, session=session)\n ret = None\n # -- try to get ip from vif\n vifs = session.xenapi.VM.get_VIFs(vm)\n if vifs is not None:\n for vif in vifs:\n if session.xenapi.VIF.get_ipv4_addresses(vif):\n cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()\n ret, subnet = cidr.split('/')\n log.debug(\n 'VM vif returned for instance: %s ip: %s', name, ret)\n return ret\n # -- try to get ip from get tools metrics\n vgm = session.xenapi.VM.get_guest_metrics(vm)\n try:\n net = session.xenapi.VM_guest_metrics.get_networks(vgm)\n if \"0/ip\" in net.keys():\n log.debug(\n 'VM guest metrics returned for instance: %s 0/ip: %s',\n name, net[\"0/ip\"]\n )\n ret = net[\"0/ip\"]\n # except Exception as ex:\n except XenAPI.Failure:\n log.info('Could not get vm metrics at this time')\n return ret\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_determine_resource_pool
|
python
|
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
|
Called by create() used to determine resource pool
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L514-L530
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_determine_storage_repo
|
python
|
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
|
Called by create() used to determine storage repo for create
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L533-L550
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
create
|
python
|
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
|
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L553-L661
|
[
"def start(name, call=None, session=None):\n '''\n Start a vm\n\n .. code-block:: bash\n\n salt-cloud -a start xenvm01\n\n '''\n if call == 'function':\n raise SaltCloudException(\n 'The show_instnce function must be called with -a or --action.'\n )\n if session is None:\n session = _get_session()\n log.info('Starting VM %s', name)\n vm = _get_vm(name, session)\n task = session.xenapi.Async.VM.start(vm, False, True)\n _run_async_task(task, session)\n return show_instance(name)\n",
"def show_instance(name, session=None, call=None):\n '''\n Show information about a specific VM or template\n\n .. code-block:: bash\n\n salt-cloud -a show_instance xenvm01\n\n .. note:: memory is memory_dynamic_max\n\n '''\n if call == 'function':\n raise SaltCloudException(\n 'The show_instnce function must be called with -a or --action.'\n )\n log.debug('show_instance-> name: %s session: %s', name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record['is_a_template'] and not record['is_control_domain']:\n try:\n base_template_name = record['other_config']['base_template_name']\n except Exception:\n base_template_name = None\n log.debug(\n 'VM %s, doesnt have base_template_name attribute',\n record['name_label']\n )\n ret = {'id': record['uuid'],\n 'image': base_template_name,\n 'name': record['name_label'],\n 'size': record['memory_dynamic_max'],\n 'state': record['power_state'],\n 'private_ips': get_vm_ip(name, session),\n 'public_ips': None}\n\n __utils__['cloud.cache_node'](\n ret,\n __active_provider_name__,\n __opts__\n )\n return ret\n",
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _wait_for_ip(name, session):\n '''\n Wait for IP to be available during create()\n '''\n start_time = datetime.now()\n status = None\n while status is None:\n status = get_vm_ip(name, session)\n if status is not None:\n # ignore APIPA address\n if status.startswith('169'):\n status = None\n check_time = datetime.now()\n delta = check_time - start_time\n log.debug(\n 'Waited %s seconds for %s to report ip address...',\n delta.seconds, name\n )\n if delta.seconds > 180:\n log.warning('Timeout getting IP address')\n break\n time.sleep(5)\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n",
"def _determine_resource_pool(session, vm_):\n '''\n Called by create() used to determine resource pool\n '''\n resource_pool = ''\n if 'resource_pool' in vm_.keys():\n resource_pool = _get_pool(vm_['resource_pool'], session)\n else:\n pool = session.xenapi.pool.get_all()\n if not pool:\n resource_pool = None\n else:\n first_pool = session.xenapi.pool.get_all()[0]\n resource_pool = first_pool\n pool_record = session.xenapi.pool.get_record(resource_pool)\n log.debug('resource pool: %s', pool_record['name_label'])\n return resource_pool\n",
"def _determine_storage_repo(session, resource_pool, vm_):\n '''\n Called by create() used to determine storage repo for create\n '''\n storage_repo = ''\n if 'storage_repo' in vm_.keys():\n storage_repo = _get_sr(vm_['storage_repo'], session)\n else:\n storage_repo = None\n if resource_pool:\n default_sr = session.xenapi.pool.get_default_SR(resource_pool)\n sr_record = session.xenapi.SR.get_record(default_sr)\n log.debug('storage repository: %s', sr_record['name_label'])\n storage_repo = default_sr\n else:\n storage_repo = None\n log.debug('storage repository: %s', storage_repo)\n return storage_repo\n",
"def _clone_vm(image=None, name=None, session=None):\n '''\n Create VM by cloning\n\n This is faster and should be used if source and target are\n in the same storage repository\n\n '''\n if session is None:\n session = _get_session()\n log.debug('Creating VM %s by cloning %s', name, image)\n source = _get_vm(image, session)\n task = session.xenapi.Async.VM.clone(source, name)\n _run_async_task(task, session)\n",
"def _copy_vm(template=None, name=None, session=None, sr=None):\n '''\n Create VM by copy\n\n This is slower and should be used if source and target are\n NOT in the same storage repository\n\n template = object reference\n name = string name of new VM\n session = object reference\n sr = object reference\n '''\n if session is None:\n session = _get_session()\n log.debug('Creating VM %s by copying %s', name, template)\n source = _get_vm(template, session)\n task = session.xenapi.Async.VM.copy(source, name, sr)\n _run_async_task(task, session)\n",
"def _provision_vm(name=None, session=None):\n '''\n Provision vm right after clone/copy\n '''\n if session is None:\n session = _get_session()\n log.info('Provisioning VM %s', name)\n vm = _get_vm(name, session)\n task = session.xenapi.Async.VM.provision(vm)\n _run_async_task(task, session)\n",
"def _set_static_ip(name, session, vm_):\n '''\n Set static IP during create() if defined\n '''\n ipv4_cidr = ''\n ipv4_gw = ''\n if 'ipv4_gw' in vm_.keys():\n log.debug('ipv4_gw is found in keys')\n ipv4_gw = vm_['ipv4_gw']\n if 'ipv4_cidr' in vm_.keys():\n log.debug('ipv4_cidr is found in keys')\n ipv4_cidr = vm_['ipv4_cidr']\n log.debug('attempting to set IP in instance')\n set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)\n",
"def _deploy_salt_minion(name, session, vm_):\n '''\n Deploy salt minion during create()\n '''\n # Get bootstrap values\n vm_['ssh_host'] = get_vm_ip(name, session)\n vm_['user'] = vm_.get('user', 'root')\n vm_['password'] = vm_.get('password', 'p@ssw0rd!')\n vm_['provider'] = vm_.get('provider', 'xen')\n log.debug('%s has IP of %s', name, vm_['ssh_host'])\n # Bootstrap Salt minion!\n if vm_['ssh_host'] is not None:\n log.info('Installing Salt minion on %s', name)\n boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)\n log.debug('boot return: %s', boot_ret)\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_deploy_salt_minion
|
python
|
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
|
Deploy salt minion during create()
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L664-L678
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_set_static_ip
|
python
|
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
|
Set static IP during create() if defined
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L681-L694
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_wait_for_ip
|
python
|
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
|
Wait for IP to be available during create()
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L697-L718
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_run_async_task
|
python
|
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
|
Run XenAPI task in asynchronous mode to prevent timeouts
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L721-L734
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_clone_vm
|
python
|
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
|
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L737-L750
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_copy_vm
|
python
|
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
|
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L753-L770
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_provision_vm
|
python
|
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
|
Provision vm right after clone/copy
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L773-L782
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
unpause
|
python
|
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
|
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L829-L848
|
[
"def show_instance(name, session=None, call=None):\n '''\n Show information about a specific VM or template\n\n .. code-block:: bash\n\n salt-cloud -a show_instance xenvm01\n\n .. note:: memory is memory_dynamic_max\n\n '''\n if call == 'function':\n raise SaltCloudException(\n 'The show_instnce function must be called with -a or --action.'\n )\n log.debug('show_instance-> name: %s session: %s', name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record['is_a_template'] and not record['is_control_domain']:\n try:\n base_template_name = record['other_config']['base_template_name']\n except Exception:\n base_template_name = None\n log.debug(\n 'VM %s, doesnt have base_template_name attribute',\n record['name_label']\n )\n ret = {'id': record['uuid'],\n 'image': base_template_name,\n 'name': record['name_label'],\n 'size': record['memory_dynamic_max'],\n 'state': record['power_state'],\n 'private_ips': get_vm_ip(name, session),\n 'public_ips': None}\n\n __utils__['cloud.cache_node'](\n ret,\n __active_provider_name__,\n __opts__\n )\n return ret\n",
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n",
"def _run_async_task(task=None, session=None):\n '''\n Run XenAPI task in asynchronous mode to prevent timeouts\n '''\n if task is None or session is None:\n return None\n task_name = session.xenapi.task.get_name_label(task)\n log.debug('Running %s', task_name)\n while session.xenapi.task.get_status(task) == 'pending':\n progress = round(session.xenapi.task.get_progress(task), 2) * 100\n log.debug('Task progress %.2f%%', progress)\n time.sleep(1)\n log.debug('Cleaning up task %s', task_name)\n session.xenapi.task.destroy(task)\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
resume
|
python
|
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
|
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L873-L892
|
[
"def show_instance(name, session=None, call=None):\n '''\n Show information about a specific VM or template\n\n .. code-block:: bash\n\n salt-cloud -a show_instance xenvm01\n\n .. note:: memory is memory_dynamic_max\n\n '''\n if call == 'function':\n raise SaltCloudException(\n 'The show_instnce function must be called with -a or --action.'\n )\n log.debug('show_instance-> name: %s session: %s', name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record['is_a_template'] and not record['is_control_domain']:\n try:\n base_template_name = record['other_config']['base_template_name']\n except Exception:\n base_template_name = None\n log.debug(\n 'VM %s, doesnt have base_template_name attribute',\n record['name_label']\n )\n ret = {'id': record['uuid'],\n 'image': base_template_name,\n 'name': record['name_label'],\n 'size': record['memory_dynamic_max'],\n 'state': record['power_state'],\n 'private_ips': get_vm_ip(name, session),\n 'public_ips': None}\n\n __utils__['cloud.cache_node'](\n ret,\n __active_provider_name__,\n __opts__\n )\n return ret\n",
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n",
"def _run_async_task(task=None, session=None):\n '''\n Run XenAPI task in asynchronous mode to prevent timeouts\n '''\n if task is None or session is None:\n return None\n task_name = session.xenapi.task.get_name_label(task)\n log.debug('Running %s', task_name)\n while session.xenapi.task.get_status(task) == 'pending':\n progress = round(session.xenapi.task.get_progress(task), 2) * 100\n log.debug('Task progress %.2f%%', progress)\n time.sleep(1)\n log.debug('Cleaning up task %s', task_name)\n session.xenapi.task.destroy(task)\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
stop
|
python
|
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
|
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L895-L909
|
[
"def shutdown(name, call=None, session=None):\n '''\n Shutdown a vm\n\n .. code-block:: bash\n\n salt-cloud -a shutdown xenvm01\n\n '''\n if call == 'function':\n raise SaltCloudException(\n 'The show_instnce function must be called with -a or --action.'\n )\n if session is None:\n session = _get_session()\n log.info('Starting VM %s', name)\n vm = _get_vm(name, session)\n task = session.xenapi.Async.VM.shutdown(vm)\n _run_async_task(task, session)\n return show_instance(name)\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
reboot
|
python
|
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
|
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L934-L957
|
[
"def show_instance(name, session=None, call=None):\n '''\n Show information about a specific VM or template\n\n .. code-block:: bash\n\n salt-cloud -a show_instance xenvm01\n\n .. note:: memory is memory_dynamic_max\n\n '''\n if call == 'function':\n raise SaltCloudException(\n 'The show_instnce function must be called with -a or --action.'\n )\n log.debug('show_instance-> name: %s session: %s', name, session)\n if session is None:\n session = _get_session()\n vm = _get_vm(name, session=session)\n record = session.xenapi.VM.get_record(vm)\n if not record['is_a_template'] and not record['is_control_domain']:\n try:\n base_template_name = record['other_config']['base_template_name']\n except Exception:\n base_template_name = None\n log.debug(\n 'VM %s, doesnt have base_template_name attribute',\n record['name_label']\n )\n ret = {'id': record['uuid'],\n 'image': base_template_name,\n 'name': record['name_label'],\n 'size': record['memory_dynamic_max'],\n 'state': record['power_state'],\n 'private_ips': get_vm_ip(name, session),\n 'public_ips': None}\n\n __utils__['cloud.cache_node'](\n ret,\n __active_provider_name__,\n __opts__\n )\n return ret\n",
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n",
"def _run_async_task(task=None, session=None):\n '''\n Run XenAPI task in asynchronous mode to prevent timeouts\n '''\n if task is None or session is None:\n return None\n task_name = session.xenapi.task.get_name_label(task)\n log.debug('Running %s', task_name)\n while session.xenapi.task.get_status(task) == 'pending':\n progress = round(session.xenapi.task.get_progress(task), 2) * 100\n log.debug('Task progress %.2f%%', progress)\n time.sleep(1)\n log.debug('Cleaning up task %s', task_name)\n session.xenapi.task.destroy(task)\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_get_vm
|
python
|
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
|
Get XEN vm instance object reference
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L960-L969
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_get_sr
|
python
|
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
|
Get XEN sr (storage repo) object reference
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L972-L981
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
_get_pool
|
python
|
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
|
Get XEN resource pool object reference
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L984-L995
| null |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
destroy
|
python
|
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
|
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L998-L1053
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n",
"def _run_async_task(task=None, session=None):\n '''\n Run XenAPI task in asynchronous mode to prevent timeouts\n '''\n if task is None or session is None:\n return None\n task_name = session.xenapi.task.get_name_label(task)\n log.debug('Running %s', task_name)\n while session.xenapi.task.get_status(task) == 'pending':\n progress = round(session.xenapi.task.get_progress(task), 2) * 100\n log.debug('Task progress %.2f%%', progress)\n time.sleep(1)\n log.debug('Cleaning up task %s', task_name)\n session.xenapi.task.destroy(task)\n",
"def destroy_vm_vdis(name=None, session=None, call=None):\n '''\n Get virtual block devices on VM\n\n .. code-block:: bash\n\n salt-cloud -a destroy_vm_vdis xenvm01\n\n '''\n if session is None:\n session = _get_session()\n ret = {}\n # get vm object\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n # read virtual block device (vdb)\n vbds = session.xenapi.VM.get_VBDs(vms[0])\n if vbds is not None:\n x = 0\n for vbd in vbds:\n vbd_record = session.xenapi.VBD.get_record(vbd)\n if vbd_record['VDI'] != 'OpaqueRef:NULL':\n # read vdi on vdb\n vdi_record = session.xenapi.VDI.get_record(\n vbd_record['VDI'])\n if 'iso' not in vdi_record['name_label']:\n session.xenapi.VDI.destroy(vbd_record['VDI'])\n ret['vdi-{}'.format(x)] = vdi_record['name_label']\n x += 1\n return ret\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
sr_list
|
python
|
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
|
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1056-L1075
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
host_list
|
python
|
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
|
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1078-L1096
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
pool_list
|
python
|
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
|
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1099-L1118
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
pif_list
|
python
|
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
|
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1121-L1139
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
vif_list
|
python
|
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
|
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1142-L1171
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
vbd_list
|
python
|
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
|
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1174-L1205
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
destroy_vm_vdis
|
python
|
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
|
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1227-L1256
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
destroy_template
|
python
|
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
|
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1259-L1289
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
get_pv_args
|
python
|
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
|
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1292-L1312
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
saltstack/salt
|
salt/cloud/clouds/xen.py
|
set_pv_args
|
python
|
def set_pv_args(name, kwargs=None, session=None, call=None):
'''
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
try:
log.debug('Setting PV Args: %s', kwargs['pv_args'])
session.xenapi.VM.set_PV_args(vm, str(kwargs['pv_args']))
except KeyError:
log.error('No pv_args parameter found.')
return False
except XenAPI.Failure:
log.info('Setting PV Args failed.')
return False
return True
|
Set PV arguments for a VM
.. code-block:: bash
salt-cloud -a set_pv_args xenvm01 pv_args="utf-8 graphical"
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/xen.py#L1315-L1341
|
[
"def _get_session():\n '''\n Get a connection to the XenServer host\n '''\n api_version = '1.0'\n originator = 'salt_cloud_{}_driver'.format(__virtualname__)\n url = config.get_cloud_config_value(\n 'url',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n user = config.get_cloud_config_value(\n 'user',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n password = config.get_cloud_config_value(\n 'password',\n get_configured_provider(),\n __opts__,\n search_global=False\n )\n ignore_ssl = config.get_cloud_config_value(\n 'ignore_ssl',\n get_configured_provider(),\n __opts__,\n default=False,\n search_global=False\n )\n try:\n session = XenAPI.Session(url, ignore_ssl=ignore_ssl)\n log.debug(\n 'url: %s user: %s password: %s, originator: %s',\n url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n except XenAPI.Failure as ex:\n pool_master_addr = six.text_type(ex.__dict__['details'][1])\n slash_parts = url.split('/')\n new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr\n session = XenAPI.Session(new_url)\n log.debug(\n 'session is -> url: %s user: %s password: %s, originator:%s',\n new_url, user, 'XXX-pw-redacted-XXX', originator\n )\n session.xenapi.login_with_password(\n user, password, api_version, originator)\n return session\n",
"def _get_vm(name=None, session=None):\n '''\n Get XEN vm instance object reference\n '''\n if session is None:\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n return vms[0]\n return None\n"
] |
# -*- coding: utf-8 -*-
'''
XenServer Cloud Driver
======================
The XenServer driver is designed to work with a Citrix XenServer.
Requires XenServer SDK
(can be downloaded from https://www.citrix.com/downloads/xenserver/product-software/ )
Place a copy of the XenAPI.py in the Python site-packages folder.
:depends: XenAPI
Example provider configuration:
.. code-block:: yaml
# /etc/salt/cloud.providers.d/myxen.conf
myxen:
driver: xen
url: http://10.0.0.120
user: root
password: p@ssw0rd
Example profile configuration:
.. code-block:: yaml
# /etc/salt/cloud.profiles.d/myxen.conf
suse:
provider: myxen
user: root
password: p@ssw0rd
image: opensuseleap42_2-template
storage_repo: 'Local storage'
resource_pool: default_pool
clone: True
minion:
master: 10.0.0.18
sles:
provider: myxen
user: root
clone: False
image: sles12sp2-template
deploy: False
w2k12:
provider: myxen
image: w2k12svr-template
clone: True
userdata_file: /srv/salt/win/files/windows-firewall.ps1
win_installer: /srv/salt/win/files/Salt-Minion-2016.11.3-AMD64-Setup.exe
win_username: Administrator
win_password: p@ssw0rd
use_winrm: False
ipv4_cidr: 10.0.0.215/24
ipv4_gw: 10.0.0.1
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
from datetime import datetime
import logging
import time
# Import salt libs
import salt.config as config
from salt.ext import six
# Import Salt-Cloud Libs
import salt.utils.cloud
from salt.exceptions import (
SaltCloudSystemExit,
SaltCloudException
)
# Get logging started
log = logging.getLogger(__name__)
try:
import XenAPI
HAS_XEN_API = True
except ImportError:
HAS_XEN_API = False
__virtualname__ = 'xen'
cache = None
def __virtual__():
'''
Only load if Xen configuration and XEN SDK is found.
'''
if get_configured_provider() is False:
return False
if _get_dependencies() is False:
return False
global cache # pylint: disable=global-statement,invalid-name
cache = salt.cache.Cache(__opts__)
return __virtualname__
def _get_dependencies():
'''
Warn if dependencies aren't met.
Checks for the XenAPI.py module
'''
return config.check_driver_dependencies(
__virtualname__,
{'XenAPI': HAS_XEN_API}
)
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('url',)
)
def _get_session():
'''
Get a connection to the XenServer host
'''
api_version = '1.0'
originator = 'salt_cloud_{}_driver'.format(__virtualname__)
url = config.get_cloud_config_value(
'url',
get_configured_provider(),
__opts__,
search_global=False
)
user = config.get_cloud_config_value(
'user',
get_configured_provider(),
__opts__,
search_global=False
)
password = config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
ignore_ssl = config.get_cloud_config_value(
'ignore_ssl',
get_configured_provider(),
__opts__,
default=False,
search_global=False
)
try:
session = XenAPI.Session(url, ignore_ssl=ignore_ssl)
log.debug(
'url: %s user: %s password: %s, originator: %s',
url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
except XenAPI.Failure as ex:
pool_master_addr = six.text_type(ex.__dict__['details'][1])
slash_parts = url.split('/')
new_url = '/'.join(slash_parts[:2]) + '/' + pool_master_addr
session = XenAPI.Session(new_url)
log.debug(
'session is -> url: %s user: %s password: %s, originator:%s',
new_url, user, 'XXX-pw-redacted-XXX', originator
)
session.xenapi.login_with_password(
user, password, api_version, originator)
return session
def list_nodes():
'''
List virtual machines
.. code-block:: bash
salt-cloud -Q
'''
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret[record['name_label']] = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(record['name_label'], session),
'public_ips': None}
return ret
def get_vm_ip(name=None, session=None, call=None):
'''
Get the IP address of the VM
.. code-block:: bash
salt-cloud -a get_vm_ip xenvm01
.. note:: Requires xen guest tools to be installed in VM
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
ret = None
# -- try to get ip from vif
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
for vif in vifs:
if session.xenapi.VIF.get_ipv4_addresses(vif):
cidr = session.xenapi.VIF.get_ipv4_addresses(vif).pop()
ret, subnet = cidr.split('/')
log.debug(
'VM vif returned for instance: %s ip: %s', name, ret)
return ret
# -- try to get ip from get tools metrics
vgm = session.xenapi.VM.get_guest_metrics(vm)
try:
net = session.xenapi.VM_guest_metrics.get_networks(vgm)
if "0/ip" in net.keys():
log.debug(
'VM guest metrics returned for instance: %s 0/ip: %s',
name, net["0/ip"]
)
ret = net["0/ip"]
# except Exception as ex:
except XenAPI.Failure:
log.info('Could not get vm metrics at this time')
return ret
def set_vm_ip(name=None,
ipv4_cidr=None,
ipv4_gw=None,
session=None,
call=None):
'''
Set the IP address on a virtual interface (vif)
'''
mode = 'static'
# TODO: Need to add support for IPv6
if call == 'function':
raise SaltCloudException(
'The function must be called with -a or --action.')
log.debug(
'Setting name: %s ipv4_cidr: %s ipv4_gw: %s mode: %s',
name, ipv4_cidr, ipv4_gw, mode
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session)
# -- try to get ip from vif
# TODO: for now will take first interface
# addition consideration needed for
# multiple interface(vif) VMs
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
log.debug('There are %s vifs.', len(vifs))
for vif in vifs:
record = session.xenapi.VIF.get_record(vif)
log.debug(record)
try:
session.xenapi.VIF.configure_ipv4(
vif, mode, ipv4_cidr, ipv4_gw)
except XenAPI.Failure:
log.info('Static IP assignment could not be performed.')
return True
def list_nodes_full(session=None):
'''
List full virtual machines
.. code-block:: bash
salt-cloud -F
'''
if session is None:
session = _get_session()
ret = {}
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
# deal with cases where the VM doesn't have 'base_template_name' attribute
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
vm_cfg = session.xenapi.VM.get_record(vm)
vm_cfg['id'] = record['uuid']
vm_cfg['name'] = record['name_label']
vm_cfg['image'] = base_template_name
vm_cfg['size'] = None
vm_cfg['state'] = record['power_state']
vm_cfg['private_ips'] = get_vm_ip(record['name_label'], session)
vm_cfg['public_ips'] = None
if 'snapshot_time' in vm_cfg.keys():
del vm_cfg['snapshot_time']
ret[record['name_label']] = vm_cfg
provider = __active_provider_name__ or 'xen'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
log.debug('ret: %s', ret)
log.debug('provider: %s', provider)
log.debug('__opts__: %s', __opts__)
__utils__['cloud.cache_node_list'](ret, provider, __opts__)
return ret
def list_nodes_select(call=None):
'''
Perform a select query on Xen VM instances
.. code-block:: bash
salt-cloud -S
'''
return salt.utils.cloud.list_nodes_select(
list_nodes_full(),
__opts__['query.selection'],
call,
)
def vdi_list(call=None, kwargs=None):
'''
Return available Xen VDI images
If this function is called with the ``-f`` or ``--function`` then
it can return a list with minimal deatil using the ``terse=True`` keyword
argument.
.. code-block:: bash
salt-cloud -f vdi_list myxen terse=True
'''
if call == 'action':
raise SaltCloudException(
'This function must be called with -f or --function.')
log.debug('kwargs is %s', kwargs)
if kwargs is not None:
if 'terse' in kwargs:
if kwargs['terse'] == 'True':
terse = True
else:
terse = False
else:
terse = False
else:
kwargs = {}
terse = False
session = _get_session()
vdis = session.xenapi.VDI.get_all()
ret = {}
for vdi in vdis:
data = session.xenapi.VDI.get_record(vdi)
log.debug(type(terse))
if terse is True:
ret[data.get('name_label')] = {
'uuid': data.get('uuid'),
'OpqueRef': vdi}
else:
data.update({'OpaqueRef': vdi})
ret[data.get('name_label')] = data
return ret
def avail_locations(session=None, call=None):
'''
Return available Xen locations (not implemented)
.. code-block:: bash
salt-cloud --list-locations myxen
'''
# TODO: need to figure out a good meaning of locations in Xen
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
return pool_list()
def avail_sizes(session=None, call=None):
'''
Return a list of Xen template definitions
.. code-block:: bash
salt-cloud --list-sizes myxen
'''
if call == 'action':
raise SaltCloudException(
'The avail_sizes function must be called with -f or --function.')
return {'STATUS':
'Sizes are build into templates. Consider running --list-images to see sizes'}
def template_list(call=None):
'''
Return available Xen template information.
This returns the details of
each template to show number cores, memory sizes, etc..
.. code-block:: bash
salt-cloud -f template_list myxen
'''
templates = {}
session = _get_session()
vms = session.xenapi.VM.get_all()
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
templates[record['name_label']] = record
return templates
def show_instance(name, session=None, call=None):
'''
Show information about a specific VM or template
.. code-block:: bash
salt-cloud -a show_instance xenvm01
.. note:: memory is memory_dynamic_max
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
log.debug('show_instance-> name: %s session: %s', name, session)
if session is None:
session = _get_session()
vm = _get_vm(name, session=session)
record = session.xenapi.VM.get_record(vm)
if not record['is_a_template'] and not record['is_control_domain']:
try:
base_template_name = record['other_config']['base_template_name']
except Exception:
base_template_name = None
log.debug(
'VM %s, doesnt have base_template_name attribute',
record['name_label']
)
ret = {'id': record['uuid'],
'image': base_template_name,
'name': record['name_label'],
'size': record['memory_dynamic_max'],
'state': record['power_state'],
'private_ips': get_vm_ip(name, session),
'public_ips': None}
__utils__['cloud.cache_node'](
ret,
__active_provider_name__,
__opts__
)
return ret
def _determine_resource_pool(session, vm_):
'''
Called by create() used to determine resource pool
'''
resource_pool = ''
if 'resource_pool' in vm_.keys():
resource_pool = _get_pool(vm_['resource_pool'], session)
else:
pool = session.xenapi.pool.get_all()
if not pool:
resource_pool = None
else:
first_pool = session.xenapi.pool.get_all()[0]
resource_pool = first_pool
pool_record = session.xenapi.pool.get_record(resource_pool)
log.debug('resource pool: %s', pool_record['name_label'])
return resource_pool
def _determine_storage_repo(session, resource_pool, vm_):
'''
Called by create() used to determine storage repo for create
'''
storage_repo = ''
if 'storage_repo' in vm_.keys():
storage_repo = _get_sr(vm_['storage_repo'], session)
else:
storage_repo = None
if resource_pool:
default_sr = session.xenapi.pool.get_default_SR(resource_pool)
sr_record = session.xenapi.SR.get_record(default_sr)
log.debug('storage repository: %s', sr_record['name_label'])
storage_repo = default_sr
else:
storage_repo = None
log.debug('storage repository: %s', storage_repo)
return storage_repo
def create(vm_):
'''
Create a VM in Xen
The configuration for this function is read from the profile settings.
.. code-block:: bash
salt-cloud -p some_profile xenvm01
'''
name = vm_['name']
record = {}
ret = {}
# fire creating event
__utils__['cloud.fire_event'](
'event',
'starting create',
'salt/cloud/{0}/creating'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('Adding %s to cloud cache.', name)
__utils__['cloud.cachedir_index_add'](
vm_['name'], vm_['profile'], 'xen', vm_['driver']
)
# connect to xen
session = _get_session()
# determine resource pool
resource_pool = _determine_resource_pool(session, vm_)
# determine storage repo
storage_repo = _determine_storage_repo(session, resource_pool, vm_)
# build VM
image = vm_.get('image')
clone = vm_.get('clone')
if clone is None:
clone = True
log.debug('Clone: %s ', clone)
# fire event to read new vm properties (requesting)
__utils__['cloud.fire_event'](
'event',
'requesting instance',
'salt/cloud/{0}/requesting'.format(name),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
# create by cloning template
if clone:
_clone_vm(image, name, session)
else:
_copy_vm(image, name, session, storage_repo)
# provision template to vm
_provision_vm(name, session)
vm = _get_vm(name, session)
# start vm
start(name, None, session)
# get new VM
vm = _get_vm(name, session)
# wait for vm to report IP via guest tools
_wait_for_ip(name, session)
# set static IP if configured
_set_static_ip(name, session, vm_)
# if not deploying salt then exit
deploy = vm_.get('deploy', True)
log.debug('delopy is set to %s', deploy)
if deploy:
record = session.xenapi.VM.get_record(vm)
if record is not None:
_deploy_salt_minion(name, session, vm_)
else:
log.debug(
'The Salt minion will not be installed, deploy: %s',
vm_['deploy']
)
record = session.xenapi.VM.get_record(vm)
ret = show_instance(name)
ret.update({'extra': record})
__utils__['cloud.fire_event'](
'event',
'created instance',
'salt/cloud/{0}/created'.format(name),
args={
'name': name,
'profile': vm_['profile'],
'provider': vm_['driver'],
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return ret
def _deploy_salt_minion(name, session, vm_):
'''
Deploy salt minion during create()
'''
# Get bootstrap values
vm_['ssh_host'] = get_vm_ip(name, session)
vm_['user'] = vm_.get('user', 'root')
vm_['password'] = vm_.get('password', 'p@ssw0rd!')
vm_['provider'] = vm_.get('provider', 'xen')
log.debug('%s has IP of %s', name, vm_['ssh_host'])
# Bootstrap Salt minion!
if vm_['ssh_host'] is not None:
log.info('Installing Salt minion on %s', name)
boot_ret = __utils__['cloud.bootstrap'](vm_, __opts__)
log.debug('boot return: %s', boot_ret)
def _set_static_ip(name, session, vm_):
'''
Set static IP during create() if defined
'''
ipv4_cidr = ''
ipv4_gw = ''
if 'ipv4_gw' in vm_.keys():
log.debug('ipv4_gw is found in keys')
ipv4_gw = vm_['ipv4_gw']
if 'ipv4_cidr' in vm_.keys():
log.debug('ipv4_cidr is found in keys')
ipv4_cidr = vm_['ipv4_cidr']
log.debug('attempting to set IP in instance')
set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)
def _wait_for_ip(name, session):
'''
Wait for IP to be available during create()
'''
start_time = datetime.now()
status = None
while status is None:
status = get_vm_ip(name, session)
if status is not None:
# ignore APIPA address
if status.startswith('169'):
status = None
check_time = datetime.now()
delta = check_time - start_time
log.debug(
'Waited %s seconds for %s to report ip address...',
delta.seconds, name
)
if delta.seconds > 180:
log.warning('Timeout getting IP address')
break
time.sleep(5)
def _run_async_task(task=None, session=None):
'''
Run XenAPI task in asynchronous mode to prevent timeouts
'''
if task is None or session is None:
return None
task_name = session.xenapi.task.get_name_label(task)
log.debug('Running %s', task_name)
while session.xenapi.task.get_status(task) == 'pending':
progress = round(session.xenapi.task.get_progress(task), 2) * 100
log.debug('Task progress %.2f%%', progress)
time.sleep(1)
log.debug('Cleaning up task %s', task_name)
session.xenapi.task.destroy(task)
def _clone_vm(image=None, name=None, session=None):
'''
Create VM by cloning
This is faster and should be used if source and target are
in the same storage repository
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by cloning %s', name, image)
source = _get_vm(image, session)
task = session.xenapi.Async.VM.clone(source, name)
_run_async_task(task, session)
def _copy_vm(template=None, name=None, session=None, sr=None):
'''
Create VM by copy
This is slower and should be used if source and target are
NOT in the same storage repository
template = object reference
name = string name of new VM
session = object reference
sr = object reference
'''
if session is None:
session = _get_session()
log.debug('Creating VM %s by copying %s', name, template)
source = _get_vm(template, session)
task = session.xenapi.Async.VM.copy(source, name, sr)
_run_async_task(task, session)
def _provision_vm(name=None, session=None):
'''
Provision vm right after clone/copy
'''
if session is None:
session = _get_session()
log.info('Provisioning VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.provision(vm)
_run_async_task(task, session)
def start(name, call=None, session=None):
'''
Start a vm
.. code-block:: bash
salt-cloud -a start xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.start(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def pause(name, call=None, session=None):
'''
Pause a vm
.. code-block:: bash
salt-cloud -a pause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Pausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.pause(vm)
_run_async_task(task, session)
return show_instance(name)
def unpause(name, call=None, session=None):
'''
UnPause a vm
.. code-block:: bash
salt-cloud -a unpause xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Unpausing VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.unpause(vm)
_run_async_task(task, session)
return show_instance(name)
def suspend(name, call=None, session=None):
'''
Suspend a vm to disk
.. code-block:: bash
salt-cloud -a suspend xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Suspending VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.suspend(vm)
_run_async_task(task, session)
return show_instance(name)
def resume(name, call=None, session=None):
'''
Resume a vm from disk
.. code-block:: bash
salt-cloud -a resume xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Resuming VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.resume(vm, False, True)
_run_async_task(task, session)
return show_instance(name)
def stop(name, call=None, session=None):
'''
Stop a vm
.. code-block:: bash
salt-cloud -a stop xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
return shutdown(name, call, session)
def shutdown(name, call=None, session=None):
'''
Shutdown a vm
.. code-block:: bash
salt-cloud -a shutdown xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
task = session.xenapi.Async.VM.shutdown(vm)
_run_async_task(task, session)
return show_instance(name)
def reboot(name, call=None, session=None):
'''
Reboot a vm
.. code-block:: bash
salt-cloud -a reboot xenvm01
'''
if call == 'function':
raise SaltCloudException(
'The show_instnce function must be called with -a or --action.'
)
if session is None:
session = _get_session()
log.info('Starting VM %s', name)
vm = _get_vm(name, session)
power_state = session.xenapi.VM.get_power_state(vm)
if power_state == 'Running':
task = session.xenapi.Async.VM.clean_reboot(vm)
_run_async_task(task, session)
return show_instance(name)
else:
return '{} is not running to be rebooted'.format(name)
def _get_vm(name=None, session=None):
'''
Get XEN vm instance object reference
'''
if session is None:
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
return vms[0]
return None
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None
def _get_pool(name=None, session=None):
'''
Get XEN resource pool object reference
'''
if session is None:
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
if name in pool_record.get('name_label'):
return pool
return None
def destroy(name=None, call=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -d xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
ret = {}
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
session = _get_session()
vm = _get_vm(name)
if vm:
# get vm
record = session.xenapi.VM.get_record(vm)
log.debug('power_state: %s', record['power_state'])
# shut down
if record['power_state'] != 'Halted':
task = session.xenapi.Async.VM.hard_shutdown(vm)
_run_async_task(task, session)
# destroy disk (vdi) by reading vdb on vm
ret['vbd'] = destroy_vm_vdis(name, session)
# destroy vm
task = session.xenapi.Async.VM.destroy(vm)
_run_async_task(task, session)
ret['destroyed'] = True
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
__utils__['cloud.cachedir_index_del'](name)
return ret
def sr_list(call=None):
'''
Geta list of storage repositories
.. code-block:: bash
salt-cloud -f sr_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
srs = session.xenapi.SR.get_all()
for sr in srs:
sr_record = session.xenapi.SR.get_record(sr)
ret[sr_record['name_label']] = sr_record
return ret
def host_list(call=None):
'''
Get a list of Xen Servers
.. code-block:: bash
salt-cloud -f host_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
hosts = session.xenapi.host.get_all()
for host in hosts:
host_record = session.xenapi.host.get_record(host)
ret[host_record['name_label']] = host_record
return ret
def pool_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pools = session.xenapi.pool.get_all()
for pool in pools:
pool_record = session.xenapi.pool.get_record(pool)
ret[pool_record['name_label']] = pool_record
return ret
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret
def vif_list(name, call=None, kwargs=None):
'''
Get a list of virtual network interfaces on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vif_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vm = _get_vm(name)
vifs = session.xenapi.VM.get_VIFs(vm)
if vifs is not None:
x = 0
for vif in vifs:
vif_record = session.xenapi.VIF.get_record(vif)
data['vif-{}'.format(x)] = vif_record
x += 1
ret[name] = data
return ret
def vbd_list(name=None, call=None):
'''
Get a list of VBDs on a VM
**requires**: the name of the vm with the vbd definition
.. code-block:: bash
salt-cloud -a vbd_list xenvm01
'''
if call == 'function':
raise SaltCloudSystemExit(
'This function must be called with -a, --action argument.'
)
if name is None:
return 'A name kwarg is rquired'
ret = {}
data = {}
session = _get_session()
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
vm = vms[0]
vbds = session.xenapi.VM.get_VBDs(vm)
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
data['vbd-{}'.format(x)] = vbd_record
x += 1
ret = data
return ret
def avail_images(call=None):
'''
Get a list of images from Xen
If called with the `--list-images` then it returns
images with all details.
.. code-block:: bash
salt-cloud --list-images myxen
'''
if call == 'action':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
return template_list()
def destroy_vm_vdis(name=None, session=None, call=None):
'''
Get virtual block devices on VM
.. code-block:: bash
salt-cloud -a destroy_vm_vdis xenvm01
'''
if session is None:
session = _get_session()
ret = {}
# get vm object
vms = session.xenapi.VM.get_by_name_label(name)
if len(vms) == 1:
# read virtual block device (vdb)
vbds = session.xenapi.VM.get_VBDs(vms[0])
if vbds is not None:
x = 0
for vbd in vbds:
vbd_record = session.xenapi.VBD.get_record(vbd)
if vbd_record['VDI'] != 'OpaqueRef:NULL':
# read vdi on vdb
vdi_record = session.xenapi.VDI.get_record(
vbd_record['VDI'])
if 'iso' not in vdi_record['name_label']:
session.xenapi.VDI.destroy(vbd_record['VDI'])
ret['vdi-{}'.format(x)] = vdi_record['name_label']
x += 1
return ret
def destroy_template(name=None, call=None, kwargs=None):
'''
Destroy Xen VM or template instance
.. code-block:: bash
salt-cloud -f destroy_template myxen name=testvm2
'''
if call == 'action':
raise SaltCloudSystemExit(
'The destroy_template function must be called with -f.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
session = _get_session()
vms = session.xenapi.VM.get_all_records()
ret = {}
found = False
for vm in vms:
record = session.xenapi.VM.get_record(vm)
if record['is_a_template']:
if record['name_label'] == name:
found = True
# log.debug(record['name_label'])
session.xenapi.VM.destroy(vm)
ret[name] = {'status': 'destroyed'}
if not found:
ret[name] = {'status': 'not found'}
return ret
def get_pv_args(name, session=None, call=None):
'''
Get PV arguments for a VM
.. code-block:: bash
salt-cloud -a get_pv_args xenvm01
'''
if call == 'function':
raise SaltCloudException(
'This function must be called with -a or --action.'
)
if session is None:
log.debug('New session being created')
session = _get_session()
vm = _get_vm(name, session=session)
pv_args = session.xenapi.VM.get_PV_args(vm)
if pv_args:
return pv_args
return None
|
saltstack/salt
|
salt/modules/mac_service.py
|
_name_in_services
|
python
|
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
|
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L76-L102
| null |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
_get_service
|
python
|
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
|
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L105-L145
|
[
"def _name_in_services(name, services):\n '''\n Checks to see if the given service is in the given services.\n\n :param str name: Service label, file name, or full path\n\n :param dict services: The currently available services.\n\n :return: The service information for the service, otherwise\n an empty dictionary\n\n :rtype: dict\n '''\n if name in services:\n # Match on label\n return services[name]\n\n for service in six.itervalues(services):\n if service['file_path'].lower() == name:\n # Match on full path\n return service\n basename, ext = os.path.splitext(service['file_name'])\n if basename.lower() == name:\n # Match on basename\n return service\n\n return dict()\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
_always_running_service
|
python
|
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
|
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L148-L185
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def show(name):\n '''\n Show properties of a launchctl service\n\n :param str name: Service label, file name, or full path\n\n :return: The service information if the service is found\n :rtype: dict\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.show org.cups.cupsd # service label\n salt '*' service.show org.cups.cupsd.plist # file name\n salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path\n '''\n return _get_service(name)\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
_get_domain_target
|
python
|
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
|
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L188-L226
|
[
"def _get_service(name):\n '''\n Get information about a service. If the service is not found, raise an\n error\n\n :param str name: Service label, file name, or full path\n\n :return: The service information for the service, otherwise an Error\n :rtype: dict\n '''\n services = __utils__['mac_utils.available_services']()\n name = name.lower()\n\n service = _name_in_services(name, services)\n\n # if we would the service we can return it\n if service:\n return service\n\n # if we got here our service is not available, now we can check to see if\n # we received a cached batch of services, if not we did a fresh check\n # so we need to raise that the service could not be found.\n try:\n if not __context__['using_cached_services']:\n raise CommandExecutionError('Service not found: {0}'.format(name))\n except KeyError:\n pass\n\n # we used a cached version to check, a service could have been made\n # between now and then, we should refresh our available services.\n services = __utils__['mac_utils.available_services'](refresh=True)\n\n # check to see if we found the service we are looking for.\n service = _name_in_services(name, services)\n\n if not service:\n # Could not find the service after refresh raise.\n raise CommandExecutionError('Service not found: {0}'.format(name))\n\n # found it :)\n return service\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
list_
|
python
|
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
|
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L298-L336
|
[
"def _get_service(name):\n '''\n Get information about a service. If the service is not found, raise an\n error\n\n :param str name: Service label, file name, or full path\n\n :return: The service information for the service, otherwise an Error\n :rtype: dict\n '''\n services = __utils__['mac_utils.available_services']()\n name = name.lower()\n\n service = _name_in_services(name, services)\n\n # if we would the service we can return it\n if service:\n return service\n\n # if we got here our service is not available, now we can check to see if\n # we received a cached batch of services, if not we did a fresh check\n # so we need to raise that the service could not be found.\n try:\n if not __context__['using_cached_services']:\n raise CommandExecutionError('Service not found: {0}'.format(name))\n except KeyError:\n pass\n\n # we used a cached version to check, a service could have been made\n # between now and then, we should refresh our available services.\n services = __utils__['mac_utils.available_services'](refresh=True)\n\n # check to see if we found the service we are looking for.\n service = _name_in_services(name, services)\n\n if not service:\n # Could not find the service after refresh raise.\n raise CommandExecutionError('Service not found: {0}'.format(name))\n\n # found it :)\n return service\n",
"def launchctl(sub_cmd, *args, **kwargs):\n '''\n Run a launchctl command and raise an error if it fails\n\n :param str sub_cmd: Sub command supplied to launchctl\n\n :param tuple args: Tuple containing additional arguments to pass to\n launchctl\n\n :param dict kwargs: Dictionary containing arguments to pass to\n ``cmd.run_all``\n\n :param bool return_stdout: A keyword argument. If true return the stdout\n of the launchctl command\n\n :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or\n the stdout of the launchctl command if requested\n :rtype: bool, str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.launchctl debug org.cups.cupsd\n '''\n return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)\n",
"def _launch_agent(name):\n '''\n Checks to see if the provided service is a LaunchAgent\n\n :param str name: Service label, file name, or full path\n\n :return: True if a LaunchAgent, False if not.\n\n :rtype: bool\n\n .. versionadded:: 2019.2.0\n '''\n\n # Get the path to the service.\n path = _get_service(name)['file_path']\n\n if 'LaunchAgents' not in path:\n return False\n return True\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
enable
|
python
|
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
|
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L339-L360
|
[
"def launchctl(sub_cmd, *args, **kwargs):\n '''\n Run a launchctl command and raise an error if it fails\n\n :param str sub_cmd: Sub command supplied to launchctl\n\n :param tuple args: Tuple containing additional arguments to pass to\n launchctl\n\n :param dict kwargs: Dictionary containing arguments to pass to\n ``cmd.run_all``\n\n :param bool return_stdout: A keyword argument. If true return the stdout\n of the launchctl command\n\n :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or\n the stdout of the launchctl command if requested\n :rtype: bool, str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.launchctl debug org.cups.cupsd\n '''\n return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)\n",
"def _get_domain_target(name, service_target=False):\n '''\n Returns the domain/service target and path for a service. This is used to\n determine whether or not a service should be loaded in a user space or\n system space.\n\n :param str name: Service label, file name, or full path\n\n :param bool service_target: Whether to return a full\n service target. This is needed for the enable and disable\n subcommands of /bin/launchctl. Defaults to False\n\n :return: Tuple of the domain/service target and the path to the service.\n\n :rtype: tuple\n\n .. versionadded:: 2019.2.0\n '''\n\n # Get service information\n service = _get_service(name)\n\n # get the path to the service\n path = service['file_path']\n\n # most of the time we'll be at the system level.\n domain_target = 'system'\n\n # check if a LaunchAgent as we should treat these differently.\n if 'LaunchAgents' in path:\n # Get the console user so we can service in the correct session\n uid = __utils__['mac_utils.console_user']()\n domain_target = 'gui/{}'.format(uid)\n\n # check to see if we need to make it a full service target.\n if service_target is True:\n domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])\n\n return (domain_target, path)\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
disable
|
python
|
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
|
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L363-L385
|
[
"def launchctl(sub_cmd, *args, **kwargs):\n '''\n Run a launchctl command and raise an error if it fails\n\n :param str sub_cmd: Sub command supplied to launchctl\n\n :param tuple args: Tuple containing additional arguments to pass to\n launchctl\n\n :param dict kwargs: Dictionary containing arguments to pass to\n ``cmd.run_all``\n\n :param bool return_stdout: A keyword argument. If true return the stdout\n of the launchctl command\n\n :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or\n the stdout of the launchctl command if requested\n :rtype: bool, str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.launchctl debug org.cups.cupsd\n '''\n return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)\n",
"def _get_domain_target(name, service_target=False):\n '''\n Returns the domain/service target and path for a service. This is used to\n determine whether or not a service should be loaded in a user space or\n system space.\n\n :param str name: Service label, file name, or full path\n\n :param bool service_target: Whether to return a full\n service target. This is needed for the enable and disable\n subcommands of /bin/launchctl. Defaults to False\n\n :return: Tuple of the domain/service target and the path to the service.\n\n :rtype: tuple\n\n .. versionadded:: 2019.2.0\n '''\n\n # Get service information\n service = _get_service(name)\n\n # get the path to the service\n path = service['file_path']\n\n # most of the time we'll be at the system level.\n domain_target = 'system'\n\n # check if a LaunchAgent as we should treat these differently.\n if 'LaunchAgents' in path:\n # Get the console user so we can service in the correct session\n uid = __utils__['mac_utils.console_user']()\n domain_target = 'gui/{}'.format(uid)\n\n # check to see if we need to make it a full service target.\n if service_target is True:\n domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])\n\n return (domain_target, path)\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
start
|
python
|
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
|
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L388-L413
|
[
"def launchctl(sub_cmd, *args, **kwargs):\n '''\n Run a launchctl command and raise an error if it fails\n\n :param str sub_cmd: Sub command supplied to launchctl\n\n :param tuple args: Tuple containing additional arguments to pass to\n launchctl\n\n :param dict kwargs: Dictionary containing arguments to pass to\n ``cmd.run_all``\n\n :param bool return_stdout: A keyword argument. If true return the stdout\n of the launchctl command\n\n :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or\n the stdout of the launchctl command if requested\n :rtype: bool, str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.launchctl debug org.cups.cupsd\n '''\n return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)\n",
"def _get_domain_target(name, service_target=False):\n '''\n Returns the domain/service target and path for a service. This is used to\n determine whether or not a service should be loaded in a user space or\n system space.\n\n :param str name: Service label, file name, or full path\n\n :param bool service_target: Whether to return a full\n service target. This is needed for the enable and disable\n subcommands of /bin/launchctl. Defaults to False\n\n :return: Tuple of the domain/service target and the path to the service.\n\n :rtype: tuple\n\n .. versionadded:: 2019.2.0\n '''\n\n # Get service information\n service = _get_service(name)\n\n # get the path to the service\n path = service['file_path']\n\n # most of the time we'll be at the system level.\n domain_target = 'system'\n\n # check if a LaunchAgent as we should treat these differently.\n if 'LaunchAgents' in path:\n # Get the console user so we can service in the correct session\n uid = __utils__['mac_utils.console_user']()\n domain_target = 'gui/{}'.format(uid)\n\n # check to see if we need to make it a full service target.\n if service_target is True:\n domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])\n\n return (domain_target, path)\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
stop
|
python
|
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
|
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L416-L442
|
[
"def launchctl(sub_cmd, *args, **kwargs):\n '''\n Run a launchctl command and raise an error if it fails\n\n :param str sub_cmd: Sub command supplied to launchctl\n\n :param tuple args: Tuple containing additional arguments to pass to\n launchctl\n\n :param dict kwargs: Dictionary containing arguments to pass to\n ``cmd.run_all``\n\n :param bool return_stdout: A keyword argument. If true return the stdout\n of the launchctl command\n\n :return: ``True`` if successful, raise ``CommandExecutionError`` if not, or\n the stdout of the launchctl command if requested\n :rtype: bool, str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.launchctl debug org.cups.cupsd\n '''\n return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)\n",
"def _get_domain_target(name, service_target=False):\n '''\n Returns the domain/service target and path for a service. This is used to\n determine whether or not a service should be loaded in a user space or\n system space.\n\n :param str name: Service label, file name, or full path\n\n :param bool service_target: Whether to return a full\n service target. This is needed for the enable and disable\n subcommands of /bin/launchctl. Defaults to False\n\n :return: Tuple of the domain/service target and the path to the service.\n\n :rtype: tuple\n\n .. versionadded:: 2019.2.0\n '''\n\n # Get service information\n service = _get_service(name)\n\n # get the path to the service\n path = service['file_path']\n\n # most of the time we'll be at the system level.\n domain_target = 'system'\n\n # check if a LaunchAgent as we should treat these differently.\n if 'LaunchAgents' in path:\n # Get the console user so we can service in the correct session\n uid = __utils__['mac_utils.console_user']()\n domain_target = 'gui/{}'.format(uid)\n\n # check to see if we need to make it a full service target.\n if service_target is True:\n domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])\n\n return (domain_target, path)\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
restart
|
python
|
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
|
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L445-L468
|
[
"def start(name, runas=None):\n '''\n Start a launchd service. Raises an error if the service fails to start\n\n .. note::\n To start a service in macOS the service must be enabled first. Use\n ``service.enable`` to enable the service.\n\n :param str name: Service label, file name, or full path\n\n :param str runas: User to run launchctl commands\n\n :return: ``True`` if successful or if the service is already running\n :rtype: bool\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.start org.cups.cupsd\n '''\n # Get the domain target.\n domain_target, path = _get_domain_target(name)\n\n # Load (bootstrap) the service: will raise an error if it fails\n return launchctl('bootstrap', domain_target, path, runas=runas)\n",
"def stop(name, runas=None):\n '''\n Stop a launchd service. Raises an error if the service fails to stop\n\n .. note::\n Though ``service.stop`` will unload a service in macOS, the service\n will start on next boot unless it is disabled. Use ``service.disable``\n to disable the service\n\n :param str name: Service label, file name, or full path\n\n :param str runas: User to run launchctl commands\n\n :return: ``True`` if successful or if the service is already stopped\n :rtype: bool\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.stop org.cups.cupsd\n '''\n # Get the domain target.\n domain_target, path = _get_domain_target(name)\n\n # Stop (bootout) the service: will raise an error if it fails\n return launchctl('bootout', domain_target, path, runas=runas)\n",
"def enabled(name, runas=None):\n '''\n Check if the specified service is enabled\n\n :param str name: The name of the service to look up\n\n :param str runas: User to run launchctl commands\n\n :return: True if the specified service enabled, otherwise False\n :rtype: bool\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.enabled org.cups.cupsd\n '''\n # Try to list the service. If it can't be listed, it's not enabled\n try:\n list_(name=name, runas=runas)\n return True\n except CommandExecutionError:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
saltstack/salt
|
salt/modules/mac_service.py
|
status
|
python
|
def status(name, sig=None, runas=None):
'''
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
'''
# Find service with ps
if sig:
return __salt__['status.pid'](sig)
try:
_get_service(name)
except CommandExecutionError as msg:
log.error(msg)
return ''
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
output = list_(runas=runas)
# Used a string here instead of a list because that's what the linux version
# of this module does
pids = ''
for line in output.splitlines():
if 'PID' in line:
continue
if re.search(name, line.split()[-1]):
if line.split()[0].isdigit():
if pids:
pids += '\n'
pids += line.split()[0]
# mac services are a little different than other platforms as they may be
# set to run on intervals and may not always active with a PID. This will
# return a string 'loaded' if it shouldn't always be running and is enabled.
if not _always_running_service(name) and enabled(name) and not pids:
return 'loaded'
return pids
|
Return the status for a service.
:param str name: Used to find the service from launchctl. Can be any part
of the service name or a regex expression.
:param str sig: Find the service with status.pid instead. Note that
``name`` must still be provided.
:param str runas: User to run launchctl commands
:return: The PID for the service if it is running, or 'loaded' if the
service should not always have a PID, or otherwise an empty string
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.status cups
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_service.py#L471-L527
|
[
"def enabled(name, runas=None):\n '''\n Check if the specified service is enabled\n\n :param str name: The name of the service to look up\n\n :param str runas: User to run launchctl commands\n\n :return: True if the specified service enabled, otherwise False\n :rtype: bool\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.enabled org.cups.cupsd\n '''\n # Try to list the service. If it can't be listed, it's not enabled\n try:\n list_(name=name, runas=runas)\n return True\n except CommandExecutionError:\n return False\n",
"def list_(name=None, runas=None):\n '''\n Run launchctl list and return the output\n\n :param str name: The name of the service to list\n\n :param str runas: User to run launchctl commands\n\n :return: If a name is passed returns information about the named service,\n otherwise returns a list of all services and pids\n :rtype: str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' service.list\n salt '*' service.list org.cups.cupsd\n '''\n if name:\n # Get service information and label\n service = _get_service(name)\n label = service['plist']['Label']\n\n # we can assume if we are trying to list a LaunchAgent we need\n # to run as a user, if not provided, we'll use the console user.\n if not runas and _launch_agent(name):\n runas = __utils__['mac_utils.console_user'](username=True)\n\n # Collect information on service: will raise an error if it fails\n return launchctl('list',\n label,\n return_stdout=True,\n runas=runas)\n\n # Collect information on all services: will raise an error if it fails\n return launchctl('list',\n return_stdout=True,\n runas=runas)\n",
"def _get_service(name):\n '''\n Get information about a service. If the service is not found, raise an\n error\n\n :param str name: Service label, file name, or full path\n\n :return: The service information for the service, otherwise an Error\n :rtype: dict\n '''\n services = __utils__['mac_utils.available_services']()\n name = name.lower()\n\n service = _name_in_services(name, services)\n\n # if we would the service we can return it\n if service:\n return service\n\n # if we got here our service is not available, now we can check to see if\n # we received a cached batch of services, if not we did a fresh check\n # so we need to raise that the service could not be found.\n try:\n if not __context__['using_cached_services']:\n raise CommandExecutionError('Service not found: {0}'.format(name))\n except KeyError:\n pass\n\n # we used a cached version to check, a service could have been made\n # between now and then, we should refresh our available services.\n services = __utils__['mac_utils.available_services'](refresh=True)\n\n # check to see if we found the service we are looking for.\n service = _name_in_services(name, services)\n\n if not service:\n # Could not find the service after refresh raise.\n raise CommandExecutionError('Service not found: {0}'.format(name))\n\n # found it :)\n return service\n",
"def _always_running_service(name):\n '''\n Check if the service should always be running based on the KeepAlive Key\n in the service plist.\n\n :param str name: Service label, file name, or full path\n\n :return: True if the KeepAlive key is set to True, False if set to False or\n not set in the plist at all.\n\n :rtype: bool\n\n .. versionadded:: 2019.2.0\n '''\n\n # get all the info from the launchctl service\n service_info = show(name)\n\n # get the value for the KeepAlive key in service plist\n try:\n keep_alive = service_info['plist']['KeepAlive']\n except KeyError:\n return False\n\n # check if KeepAlive is True and not just set.\n\n if isinstance(keep_alive, dict):\n # check for pathstate\n for _file, value in six.iteritems(keep_alive.get('PathState', {})):\n if value is True and os.path.exists(_file):\n return True\n elif value is False and not os.path.exists(_file):\n return True\n\n if keep_alive is True:\n return True\n\n return False\n",
"def _launch_agent(name):\n '''\n Checks to see if the provided service is a LaunchAgent\n\n :param str name: Service label, file name, or full path\n\n :return: True if a LaunchAgent, False if not.\n\n :rtype: bool\n\n .. versionadded:: 2019.2.0\n '''\n\n # Get the path to the service.\n path = _get_service(name)['file_path']\n\n if 'LaunchAgents' not in path:\n return False\n return True\n"
] |
# -*- coding: utf-8 -*-
'''
The service module for macOS
.. versionadded:: 2016.3.0
This module has support for services in the following locations.
.. code-block:: bash
/System/Library/LaunchDaemons/
/System/Library/LaunchAgents/
/Library/LaunchDaemons/
/Library/LaunchAgents/
# As of version "2019.2.0" support for user-specific services were added.
/Users/foo/Library/LaunchAgents/
.. note::
As of the 2019.2.0 release, if a service is located in a ``LaunchAgent``
path and a ``runas`` user is NOT specified, the current console user will
be used to properly interact with the service.
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import os
import re
# Import salt libs
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandExecutionError
from salt.utils.versions import LooseVersion as _LooseVersion
# Import 3rd party libs
from salt.ext import six
# Define the module's virtual name
__virtualname__ = 'service'
__func_alias__ = {
'list_': 'list',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only for macOS with launchctl
'''
if not salt.utils.platform.is_darwin():
return (False, 'Failed to load the mac_service module:\n'
'Only available on macOS systems.')
if not salt.utils.path.which('launchctl'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "launchctl"')
if not salt.utils.path.which('plutil'):
return (False, 'Failed to load the mac_service module:\n'
'Required binary not found: "plutil"')
if _LooseVersion(__grains__['osrelease']) < _LooseVersion('10.11'):
return (False, 'Failed to load the mac_service module:\n'
'Requires macOS 10.11 or newer')
return __virtualname__
def _name_in_services(name, services):
'''
Checks to see if the given service is in the given services.
:param str name: Service label, file name, or full path
:param dict services: The currently available services.
:return: The service information for the service, otherwise
an empty dictionary
:rtype: dict
'''
if name in services:
# Match on label
return services[name]
for service in six.itervalues(services):
if service['file_path'].lower() == name:
# Match on full path
return service
basename, ext = os.path.splitext(service['file_name'])
if basename.lower() == name:
# Match on basename
return service
return dict()
def _get_service(name):
'''
Get information about a service. If the service is not found, raise an
error
:param str name: Service label, file name, or full path
:return: The service information for the service, otherwise an Error
:rtype: dict
'''
services = __utils__['mac_utils.available_services']()
name = name.lower()
service = _name_in_services(name, services)
# if we would the service we can return it
if service:
return service
# if we got here our service is not available, now we can check to see if
# we received a cached batch of services, if not we did a fresh check
# so we need to raise that the service could not be found.
try:
if not __context__['using_cached_services']:
raise CommandExecutionError('Service not found: {0}'.format(name))
except KeyError:
pass
# we used a cached version to check, a service could have been made
# between now and then, we should refresh our available services.
services = __utils__['mac_utils.available_services'](refresh=True)
# check to see if we found the service we are looking for.
service = _name_in_services(name, services)
if not service:
# Could not find the service after refresh raise.
raise CommandExecutionError('Service not found: {0}'.format(name))
# found it :)
return service
def _always_running_service(name):
'''
Check if the service should always be running based on the KeepAlive Key
in the service plist.
:param str name: Service label, file name, or full path
:return: True if the KeepAlive key is set to True, False if set to False or
not set in the plist at all.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# get all the info from the launchctl service
service_info = show(name)
# get the value for the KeepAlive key in service plist
try:
keep_alive = service_info['plist']['KeepAlive']
except KeyError:
return False
# check if KeepAlive is True and not just set.
if isinstance(keep_alive, dict):
# check for pathstate
for _file, value in six.iteritems(keep_alive.get('PathState', {})):
if value is True and os.path.exists(_file):
return True
elif value is False and not os.path.exists(_file):
return True
if keep_alive is True:
return True
return False
def _get_domain_target(name, service_target=False):
'''
Returns the domain/service target and path for a service. This is used to
determine whether or not a service should be loaded in a user space or
system space.
:param str name: Service label, file name, or full path
:param bool service_target: Whether to return a full
service target. This is needed for the enable and disable
subcommands of /bin/launchctl. Defaults to False
:return: Tuple of the domain/service target and the path to the service.
:rtype: tuple
.. versionadded:: 2019.2.0
'''
# Get service information
service = _get_service(name)
# get the path to the service
path = service['file_path']
# most of the time we'll be at the system level.
domain_target = 'system'
# check if a LaunchAgent as we should treat these differently.
if 'LaunchAgents' in path:
# Get the console user so we can service in the correct session
uid = __utils__['mac_utils.console_user']()
domain_target = 'gui/{}'.format(uid)
# check to see if we need to make it a full service target.
if service_target is True:
domain_target = '{}/{}'.format(domain_target, service['plist']['Label'])
return (domain_target, path)
def _launch_agent(name):
'''
Checks to see if the provided service is a LaunchAgent
:param str name: Service label, file name, or full path
:return: True if a LaunchAgent, False if not.
:rtype: bool
.. versionadded:: 2019.2.0
'''
# Get the path to the service.
path = _get_service(name)['file_path']
if 'LaunchAgents' not in path:
return False
return True
def show(name):
'''
Show properties of a launchctl service
:param str name: Service label, file name, or full path
:return: The service information if the service is found
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' service.show org.cups.cupsd # service label
salt '*' service.show org.cups.cupsd.plist # file name
salt '*' service.show /System/Library/LaunchDaemons/org.cups.cupsd.plist # full path
'''
return _get_service(name)
def launchctl(sub_cmd, *args, **kwargs):
'''
Run a launchctl command and raise an error if it fails
:param str sub_cmd: Sub command supplied to launchctl
:param tuple args: Tuple containing additional arguments to pass to
launchctl
:param dict kwargs: Dictionary containing arguments to pass to
``cmd.run_all``
:param bool return_stdout: A keyword argument. If true return the stdout
of the launchctl command
:return: ``True`` if successful, raise ``CommandExecutionError`` if not, or
the stdout of the launchctl command if requested
:rtype: bool, str
CLI Example:
.. code-block:: bash
salt '*' service.launchctl debug org.cups.cupsd
'''
return __utils__['mac_utils.launchctl'](sub_cmd, *args, **kwargs)
def list_(name=None, runas=None):
'''
Run launchctl list and return the output
:param str name: The name of the service to list
:param str runas: User to run launchctl commands
:return: If a name is passed returns information about the named service,
otherwise returns a list of all services and pids
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' service.list
salt '*' service.list org.cups.cupsd
'''
if name:
# Get service information and label
service = _get_service(name)
label = service['plist']['Label']
# we can assume if we are trying to list a LaunchAgent we need
# to run as a user, if not provided, we'll use the console user.
if not runas and _launch_agent(name):
runas = __utils__['mac_utils.console_user'](username=True)
# Collect information on service: will raise an error if it fails
return launchctl('list',
label,
return_stdout=True,
runas=runas)
# Collect information on all services: will raise an error if it fails
return launchctl('list',
return_stdout=True,
runas=runas)
def enable(name, runas=None):
'''
Enable a launchd service. Raises an error if the service fails to be enabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already enabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enable org.cups.cupsd
'''
# Get the domain target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# Enable the service: will raise an error if it fails
return launchctl('enable', service_target, runas=runas)
def disable(name, runas=None):
'''
Disable a launchd service. Raises an error if the service fails to be
disabled
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disable org.cups.cupsd
'''
# Get the service target. enable requires a full <service-target>
service_target = _get_domain_target(name, service_target=True)[0]
# disable the service: will raise an error if it fails
return launchctl('disable', service_target, runas=runas)
def start(name, runas=None):
'''
Start a launchd service. Raises an error if the service fails to start
.. note::
To start a service in macOS the service must be enabled first. Use
``service.enable`` to enable the service.
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already running
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.start org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Load (bootstrap) the service: will raise an error if it fails
return launchctl('bootstrap', domain_target, path, runas=runas)
def stop(name, runas=None):
'''
Stop a launchd service. Raises an error if the service fails to stop
.. note::
Though ``service.stop`` will unload a service in macOS, the service
will start on next boot unless it is disabled. Use ``service.disable``
to disable the service
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful or if the service is already stopped
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.stop org.cups.cupsd
'''
# Get the domain target.
domain_target, path = _get_domain_target(name)
# Stop (bootout) the service: will raise an error if it fails
return launchctl('bootout', domain_target, path, runas=runas)
def restart(name, runas=None):
'''
Unloads and reloads a launchd service. Raises an error if the service
fails to reload
:param str name: Service label, file name, or full path
:param str runas: User to run launchctl commands
:return: ``True`` if successful
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.restart org.cups.cupsd
'''
# Restart the service: will raise an error if it fails
if enabled(name):
stop(name, runas=runas)
start(name, runas=runas)
return True
def available(name):
'''
Check that the given service is available.
:param str name: The name of the service
:return: True if the service is available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.available com.openssh.sshd
'''
try:
_get_service(name)
return True
except CommandExecutionError:
return False
def missing(name):
'''
The inverse of service.available
Check that the given service is not available.
:param str name: The name of the service
:return: True if the service is not available, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.missing com.openssh.sshd
'''
return not available(name)
def enabled(name, runas=None):
'''
Check if the specified service is enabled
:param str name: The name of the service to look up
:param str runas: User to run launchctl commands
:return: True if the specified service enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.enabled org.cups.cupsd
'''
# Try to list the service. If it can't be listed, it's not enabled
try:
list_(name=name, runas=runas)
return True
except CommandExecutionError:
return False
def disabled(name, runas=None, domain='system'):
'''
Check if the specified service is not enabled. This is the opposite of
``service.enabled``
:param str name: The name to look up
:param str runas: User to run launchctl commands
:param str domain: domain to check for disabled services. Default is system.
:return: True if the specified service is NOT enabled, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' service.disabled org.cups.cupsd
'''
disabled = launchctl('print-disabled',
domain,
return_stdout=True,
runas=runas)
for service in disabled.split("\n"):
if name in service:
srv_name = service.split("=>")[0].split("\"")[1]
status = service.split("=>")[1]
if name != srv_name:
pass
else:
return True if 'true' in status.lower() else False
return False
def get_all(runas=None):
'''
Return a list of services that are enabled or available. Can be used to
find the name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services available or enabled
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
# Get list of enabled services
enabled = get_enabled(runas=runas)
# Get list of all services
available = list(__utils__['mac_utils.available_services']().keys())
# Return composite list
return sorted(set(enabled + available))
def get_enabled(runas=None):
'''
Return a list of all services that are enabled. Can be used to find the
name of a service.
:param str runas: User to run launchctl commands
:return: A list of all the services enabled on the system
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
# Collect list of enabled services
stdout = list_(runas=runas)
service_lines = [line for line in stdout.splitlines()]
# Construct list of enabled services
enabled = []
for line in service_lines:
# Skip header line
if line.startswith('PID'):
continue
pid, status, label = line.split('\t')
enabled.append(label)
return sorted(set(enabled))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.