repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/modules/redismod.py
ping
python
def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False
Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L557-L571
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret))) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
save
python
def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save()
Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L574-L585
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret))) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
set_key
python
def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value)
Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L588-L599
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret))) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
shutdown
python
def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False
Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L602-L625
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret))) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
slaveof
python
def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port)
Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L628-L646
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret))) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
smembers
python
def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key))
Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L649-L660
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret))) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
time
python
def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0]
Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L663-L674
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret))) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
zcard
python
def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key)
Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L677-L688
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret))) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
zrange
python
def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop)
Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L691-L702
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret))) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
sentinel_get_master_ip
python
def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret)))
Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L705-L719
[ "def _sconnect(host=None, port=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if host is None:\n host = __salt__['config.option']('redis_sentinel.host', 'localhost')\n if port is None:\n port = __salt__['config.option']('redis_sentinel.port', 26379)\n if password is None:\n password = __salt__['config.option']('redis_sentinel.password')\n\n return redis.StrictRedis(host, port, password=password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/redismod.py
get_master_ip
python
def get_master_ip(host=None, port=None, password=None): ''' Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip ''' server = _connect(host, port, password) srv_info = server.info() ret = (srv_info.get('master_host', ''), srv_info.get('master_port', '')) return dict(list(zip(('master_host', 'master_port'), ret)))
Get host information about slave .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.get_master_ip
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/redismod.py#L722-L737
[ "def _connect(host=None, port=None, db=None, password=None):\n '''\n Returns an instance of the redis client\n '''\n if not host:\n host = __salt__['config.option']('redis.host')\n if not port:\n port = __salt__['config.option']('redis.port')\n if not db:\n db = __salt__['config.option']('redis.db')\n if not password:\n password = __salt__['config.option']('redis.password')\n\n return redis.StrictRedis(host, port, db, password, decode_responses=True)\n" ]
# -*- coding: utf-8 -*- ''' Module to provide redis functionality to Salt .. versionadded:: 2014.7.0 :configuration: This module requires the redis python module and uses the following defaults which may be overridden in the minion configuration: .. code-block:: yaml redis.host: 'salt' redis.port: 6379 redis.db: 0 redis.password: None ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function from salt.ext.six.moves import zip from salt.ext import six from datetime import datetime import salt.utils.args # Import third party libs try: import redis HAS_REDIS = True except ImportError: HAS_REDIS = False __virtualname__ = 'redis' def __virtual__(): ''' Only load this module if redis python module is installed ''' if HAS_REDIS: return __virtualname__ else: return (False, 'The redis execution module failed to load: the redis python library is not available.') def _connect(host=None, port=None, db=None, password=None): ''' Returns an instance of the redis client ''' if not host: host = __salt__['config.option']('redis.host') if not port: port = __salt__['config.option']('redis.port') if not db: db = __salt__['config.option']('redis.db') if not password: password = __salt__['config.option']('redis.password') return redis.StrictRedis(host, port, db, password, decode_responses=True) def _sconnect(host=None, port=None, password=None): ''' Returns an instance of the redis client ''' if host is None: host = __salt__['config.option']('redis_sentinel.host', 'localhost') if port is None: port = __salt__['config.option']('redis_sentinel.port', 26379) if password is None: password = __salt__['config.option']('redis_sentinel.password') return redis.StrictRedis(host, port, password=password, decode_responses=True) def bgrewriteaof(host=None, port=None, db=None, password=None): ''' Asynchronously rewrite the append-only file CLI Example: .. code-block:: bash salt '*' redis.bgrewriteaof ''' server = _connect(host, port, db, password) return server.bgrewriteaof() def bgsave(host=None, port=None, db=None, password=None): ''' Asynchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.bgsave ''' server = _connect(host, port, db, password) return server.bgsave() def config_get(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_get salt '*' redis.config_get port ''' server = _connect(host, port, db, password) return server.config_get(pattern) def config_set(name, value, host=None, port=None, db=None, password=None): ''' Set redis server configuration values CLI Example: .. code-block:: bash salt '*' redis.config_set masterauth luv_kittens ''' server = _connect(host, port, db, password) return server.config_set(name, value) def dbsize(host=None, port=None, db=None, password=None): ''' Return the number of keys in the selected database CLI Example: .. code-block:: bash salt '*' redis.dbsize ''' server = _connect(host, port, db, password) return server.dbsize() def delete(*keys, **connection_args): ''' Deletes the keys from redis, returns number of keys deleted CLI Example: .. code-block:: bash salt '*' redis.delete foo ''' # Get connection args from keywords if set conn_args = {} for arg in ['host', 'port', 'db', 'password']: if arg in connection_args: conn_args[arg] = connection_args[arg] server = _connect(**conn_args) return server.delete(*keys) def exists(key, host=None, port=None, db=None, password=None): ''' Return true if the key exists in redis CLI Example: .. code-block:: bash salt '*' redis.exists foo ''' server = _connect(host, port, db, password) return server.exists(key) def expire(key, seconds, host=None, port=None, db=None, password=None): ''' Set a keys time to live in seconds CLI Example: .. code-block:: bash salt '*' redis.expire foo 300 ''' server = _connect(host, port, db, password) return server.expire(key, seconds) def expireat(key, timestamp, host=None, port=None, db=None, password=None): ''' Set a keys expire at given UNIX time CLI Example: .. code-block:: bash salt '*' redis.expireat foo 1400000000 ''' server = _connect(host, port, db, password) return server.expireat(key, timestamp) def flushall(host=None, port=None, db=None, password=None): ''' Remove all keys from all databases CLI Example: .. code-block:: bash salt '*' redis.flushall ''' server = _connect(host, port, db, password) return server.flushall() def flushdb(host=None, port=None, db=None, password=None): ''' Remove all keys from the selected database CLI Example: .. code-block:: bash salt '*' redis.flushdb ''' server = _connect(host, port, db, password) return server.flushdb() def get_key(key, host=None, port=None, db=None, password=None): ''' Get redis key value CLI Example: .. code-block:: bash salt '*' redis.get_key foo ''' server = _connect(host, port, db, password) return server.get(key) def hdel(key, *fields, **options): ''' Delete one of more hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hdel foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hdel(key, *fields) def hexists(key, field, host=None, port=None, db=None, password=None): ''' Determine if a hash fields exists. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hexists foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hexists(key, field) def hget(key, field, host=None, port=None, db=None, password=None): ''' Get specific field value from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hget foo_hash bar_field ''' server = _connect(host, port, db, password) return server.hget(key, field) def hgetall(key, host=None, port=None, db=None, password=None): ''' Get all fields and values from a redis hash, returns dict CLI Example: .. code-block:: bash salt '*' redis.hgetall foo_hash ''' server = _connect(host, port, db, password) return server.hgetall(key) def hincrby(key, field, increment=1, host=None, port=None, db=None, password=None): ''' Increment the integer value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrby foo_hash bar_field 5 ''' server = _connect(host, port, db, password) return server.hincrby(key, field, amount=increment) def hincrbyfloat(key, field, increment=1.0, host=None, port=None, db=None, password=None): ''' Increment the float value of a hash field by the given number. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hincrbyfloat foo_hash bar_field 5.17 ''' server = _connect(host, port, db, password) return server.hincrbyfloat(key, field, amount=increment) def hlen(key, host=None, port=None, db=None, password=None): ''' Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash ''' server = _connect(host, port, db, password) return server.hlen(key) def hmget(key, *fields, **options): ''' Returns the values of all the given hash fields. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmget foo_hash bar_field1 bar_field2 ''' host = options.get('host', None) port = options.get('port', None) database = options.get('db', None) password = options.get('password', None) server = _connect(host, port, database, password) return server.hmget(key, *fields) def hmset(key, **fieldsvals): ''' Sets multiple hash fields to multiple values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hmset foo_hash bar_field1=bar_value1 bar_field2=bar_value2 ''' host = fieldsvals.pop('host', None) port = fieldsvals.pop('port', None) database = fieldsvals.pop('db', None) password = fieldsvals.pop('password', None) server = _connect(host, port, database, password) return server.hmset(key, salt.utils.args.clean_kwargs(**fieldsvals)) def hset(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hset foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hset(key, field, value) def hsetnx(key, field, value, host=None, port=None, db=None, password=None): ''' Set the value of a hash field only if the field does not exist. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hsetnx foo_hash bar_field bar_value ''' server = _connect(host, port, db, password) return server.hsetnx(key, field, value) def hvals(key, host=None, port=None, db=None, password=None): ''' Return all the values in a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hvals foo_hash bar_field1 bar_value1 ''' server = _connect(host, port, db, password) return server.hvals(key) def hscan(key, cursor=0, match=None, count=None, host=None, port=None, db=None, password=None): ''' Incrementally iterate hash fields and associated values. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hscan foo_hash match='field_prefix_*' count=1 ''' server = _connect(host, port, db, password) return server.hscan(key, cursor=cursor, match=match, count=count) def info(host=None, port=None, db=None, password=None): ''' Get information and statistics about the server CLI Example: .. code-block:: bash salt '*' redis.info ''' server = _connect(host, port, db, password) return server.info() def keys(pattern='*', host=None, port=None, db=None, password=None): ''' Get redis keys, supports glob style patterns CLI Example: .. code-block:: bash salt '*' redis.keys salt '*' redis.keys test* ''' server = _connect(host, port, db, password) return server.keys(pattern) def key_type(key, host=None, port=None, db=None, password=None): ''' Get redis key type CLI Example: .. code-block:: bash salt '*' redis.type foo ''' server = _connect(host, port, db, password) return server.type(key) def lastsave(host=None, port=None, db=None, password=None): ''' Get the UNIX time in seconds of the last successful save to disk CLI Example: .. code-block:: bash salt '*' redis.lastsave ''' # Use of %s to get the timestamp is not supported by Python. The reason it # works is because it's passed to the system strftime which may not support # it. See: https://stackoverflow.com/a/11743262 server = _connect(host, port, db, password) if six.PY2: return int((server.lastsave() - datetime(1970, 1, 1)).total_seconds()) else: return int(server.lastsave().timestamp()) def llen(key, host=None, port=None, db=None, password=None): ''' Get the length of a list in Redis CLI Example: .. code-block:: bash salt '*' redis.llen foo_list ''' server = _connect(host, port, db, password) return server.llen(key) def lrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a list in Redis CLI Example: .. code-block:: bash salt '*' redis.lrange foo_list 0 10 ''' server = _connect(host, port, db, password) return server.lrange(key, start, stop) def ping(host=None, port=None, db=None, password=None): ''' Ping the server, returns False on connection errors CLI Example: .. code-block:: bash salt '*' redis.ping ''' server = _connect(host, port, db, password) try: return server.ping() except redis.ConnectionError: return False def save(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk CLI Example: .. code-block:: bash salt '*' redis.save ''' server = _connect(host, port, db, password) return server.save() def set_key(key, value, host=None, port=None, db=None, password=None): ''' Set redis key value CLI Example: .. code-block:: bash salt '*' redis.set_key foo bar ''' server = _connect(host, port, db, password) return server.set(key, value) def shutdown(host=None, port=None, db=None, password=None): ''' Synchronously save the dataset to disk and then shut down the server CLI Example: .. code-block:: bash salt '*' redis.shutdown ''' server = _connect(host, port, db, password) try: # Return false if unable to ping server server.ping() except redis.ConnectionError: return False server.shutdown() try: # This should fail now if the server is shutdown, which we want server.ping() except redis.ConnectionError: return True return False def slaveof(master_host=None, master_port=None, host=None, port=None, db=None, password=None): ''' Make the server a slave of another instance, or promote it as master CLI Example: .. code-block:: bash # Become slave of redis-n01.example.com:6379 salt '*' redis.slaveof redis-n01.example.com 6379 salt '*' redis.slaveof redis-n01.example.com # Become master salt '*' redis.slaveof ''' if master_host and not master_port: master_port = 6379 server = _connect(host, port, db, password) return server.slaveof(master_host, master_port) def smembers(key, host=None, port=None, db=None, password=None): ''' Get members in a Redis set CLI Example: .. code-block:: bash salt '*' redis.smembers foo_set ''' server = _connect(host, port, db, password) return list(server.smembers(key)) def time(host=None, port=None, db=None, password=None): ''' Return the current server UNIX time in seconds CLI Example: .. code-block:: bash salt '*' redis.time ''' server = _connect(host, port, db, password) return server.time()[0] def zcard(key, host=None, port=None, db=None, password=None): ''' Get the length of a sorted set in Redis CLI Example: .. code-block:: bash salt '*' redis.zcard foo_sorted ''' server = _connect(host, port, db, password) return server.zcard(key) def zrange(key, start, stop, host=None, port=None, db=None, password=None): ''' Get a range of values from a sorted set in Redis by index CLI Example: .. code-block:: bash salt '*' redis.zrange foo_sorted 0 10 ''' server = _connect(host, port, db, password) return server.zrange(key, start, stop) def sentinel_get_master_ip(master, host=None, port=None, password=None): ''' Get ip for sentinel master .. versionadded: 2016.3.0 CLI Example: .. code-block:: bash salt '*' redis.sentinel_get_master_ip 'mymaster' ''' server = _sconnect(host, port, password) ret = server.sentinel_get_master_addr_by_name(master) return dict(list(zip(('master_host', 'master_port'), ret)))
saltstack/salt
salt/modules/boto_efs.py
_get_conn
python
def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client
Create a boto3 client connection to EFS
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L82-L121
null
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
create_file_system
python
def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response
Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L124-L172
[ "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
create_mount_target
python
def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups)
Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L175-L240
[ "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
create_tags
python
def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags)
Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L243-L275
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
delete_file_system
python
def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid)
Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L278-L303
[ "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
delete_mount_target
python
def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid)
Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L306-L337
[ "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
delete_tags
python
def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags)
Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L340-L365
[ "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
get_file_systems
python
def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result
Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L368-L421
[ "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
get_mount_targets
python
def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result
Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L424-L468
[ "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
get_tags
python
def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result
Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L471-L501
[ "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
saltstack/salt
salt/modules/boto_efs.py
set_security_groups
python
def set_security_groups(mounttargetid, securitygroup, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.modify_mount_target_security_groups(MountTargetId=mounttargetid, SecurityGroups=securitygroup)
Modifies the set of security groups in effect for a mount target mounttargetid (string) - ID of the mount target whose security groups will be modified securitygroups (list[string]) - list of no more than 5 VPC security group IDs. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.set_security_groups my-mount-target-id my-sec-group
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_efs.py#L504-L529
[ "def _get_conn(key=None,\n keyid=None,\n profile=None,\n region=None,\n **kwargs):\n '''\n Create a boto3 client connection to EFS\n '''\n client = None\n if profile:\n if isinstance(profile, six.string_types):\n if profile in __pillar__:\n profile = __pillar__[profile]\n elif profile in __opts__:\n profile = __opts__[profile]\n elif key or keyid or region:\n profile = {}\n if key:\n profile['key'] = key\n if keyid:\n profile['keyid'] = keyid\n if region:\n profile['region'] = region\n\n if isinstance(profile, dict):\n if 'region' in profile:\n profile['region_name'] = profile['region']\n profile.pop('region', None)\n if 'key' in profile:\n profile['aws_secret_access_key'] = profile['key']\n profile.pop('key', None)\n if 'keyid' in profile:\n profile['aws_access_key_id'] = profile['keyid']\n profile.pop('keyid', None)\n\n client = boto3.client('efs', **profile)\n else:\n client = boto3.client('efs')\n\n return client\n" ]
# -*- coding: utf-8 -*- ''' Connection module for Amazon EFS .. versionadded:: 2017.7.0 :configuration: This module accepts explicit EFS credentials but can also utilize IAM roles assigned to the instance through Instance Profiles or it can read them from the ~/.aws/credentials file or from these environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available at: .. code-block:: text http://docs.aws.amazon.com/efs/latest/ug/ access-control-managing-permissions.html http://boto3.readthedocs.io/en/latest/guide/ configuration.html#guide-configuration If IAM roles are not used you need to specify them either in a pillar or in the minion's config file .. code-block:: yaml efs.keyid: GKTADJGHEIQSXMKKRBJ08H efs.key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs A region may also be specified in the configuration .. code-block:: yaml efs.region: us-east-1 If a region is not specified, the default is us-east-1. It's also possible to speficy key, keyid, and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askd+ghsdfjkghWupU/asdflkdfklgjsdfjajkghs region: us-east-1 :depends: boto3 ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import 3rd-party libs from salt.ext import six try: import boto3 HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False # Import salt libs import salt.utils.versions log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto3 libraries exist and if boto3 libraries are greater than a given version. ''' return salt.utils.versions.check_boto_reqs( boto3_ver='1.0.0', check_boto=False ) def _get_conn(key=None, keyid=None, profile=None, region=None, **kwargs): ''' Create a boto3 client connection to EFS ''' client = None if profile: if isinstance(profile, six.string_types): if profile in __pillar__: profile = __pillar__[profile] elif profile in __opts__: profile = __opts__[profile] elif key or keyid or region: profile = {} if key: profile['key'] = key if keyid: profile['keyid'] = keyid if region: profile['region'] = region if isinstance(profile, dict): if 'region' in profile: profile['region_name'] = profile['region'] profile.pop('region', None) if 'key' in profile: profile['aws_secret_access_key'] = profile['key'] profile.pop('key', None) if 'keyid' in profile: profile['aws_access_key_id'] = profile['keyid'] profile.pop('keyid', None) client = boto3.client('efs', **profile) else: client = boto3.client('efs') return client def create_file_system(name, performance_mode='generalPurpose', keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Creates a new, empty file system. name (string) - The name for the new file system performance_mode (string) - The PerformanceMode of the file system. Can be either generalPurpose or maxIO creation_token (string) - A unique name to be used as reference when creating an EFS. This will ensure idempotency. Set to name if not specified otherwise returns (dict) - A dict of the data for the elastic file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_file_system efs-name generalPurpose ''' if creation_token is None: creation_token = name tags = {"Key": "Name", "Value": name} client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.create_file_system(CreationToken=creation_token, PerformanceMode=performance_mode) if 'FileSystemId' in response: client.create_tags(FileSystemId=response['FileSystemId'], Tags=tags) if 'Name' in response: response['Name'] = name return response def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates a mount target for a file system. You can then mount the file system on EC2 instances via the mount target. You can create one mount target in each Availability Zone in your VPC. All EC2 instances in a VPC within a given Availability Zone share a single mount target for a given file system. If you have multiple subnets in an Availability Zone, you create a mount target in one of the subnets. EC2 instances do not need to be in the same subnet as the mount target in order to access their file system. filesystemid (string) - ID of the file system for which to create the mount target. subnetid (string) - ID of the subnet to add the mount target in. ipaddress (string) - Valid IPv4 address within the address range of the specified subnet. securitygroups (list[string]) - Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be for the same VPC as subnet specified. returns (dict) - A dict of the response data CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_mount_target filesystemid subnetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if ipaddress is None and securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid) if ipaddress is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, SecurityGroups=securitygroups) if securitygroups is None: return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress) return client.create_mount_target(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups) def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags) def delete_file_system(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes a file system, permanently severing access to its contents. Upon return, the file system no longer exists and you can't access any contents of the deleted file system. You can't delete a file system that is in use. That is, if the file system has any mount targets, you must first delete them. filesystemid (string) - ID of the file system to delete. CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_file_system filesystemid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_file_system(FileSystemId=filesystemid) def delete_mount_target(mounttargetid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified mount target. This operation forcibly breaks any mounts of the file system via the mount target that is being deleted, which might disrupt instances or applications using those mounts. To avoid applications getting cut off abruptly, you might consider unmounting any mounts of the mount target, if feasible. The operation also deletes the associated network interface. Uncommitted writes may be lost, but breaking a mount target using this operation does not corrupt the file system itself. The file system you created remains. You can mount an EC2 instance in your VPC via another mount target. mounttargetid (string) - ID of the mount target to delete CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_mount_target mounttargetid ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_mount_target(MountTargetId=mounttargetid) def delete_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Deletes the specified tags from a file system. filesystemid (string) - ID of the file system for whose tags will be removed. tags (list[string]) - The tag keys to delete to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.delete_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) client.delete_tags(FileSystemId=filesystemid, Tags=tags) def get_file_systems(filesystemid=None, keyid=None, key=None, profile=None, region=None, creation_token=None, **kwargs): ''' Get all EFS properties or a specific instance property if filesystemid is specified filesystemid (string) - ID of the file system to retrieve properties creation_token (string) - A unique token that identifies an EFS. If fileysystem created via create_file_system this would either be explictitly passed in or set to name. You can limit your search with this. returns (list[dict]) - list of all elastic file system properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_file_systems efs-id ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid and creation_token: response = client.describe_file_systems(FileSystemId=filesystemid, CreationToken=creation_token) result = response["FileSystems"] elif filesystemid: response = client.describe_file_systems(FileSystemId=filesystemid) result = response["FileSystems"] elif creation_token: response = client.describe_file_systems(CreationToken=creation_token) result = response["FileSystems"] else: response = client.describe_file_systems() result = response["FileSystems"] while "NextMarker" in response: response = client.describe_file_systems( Marker=response["NextMarker"]) result.extend(response["FileSystems"]) return result def get_mount_targets(filesystemid=None, mounttargetid=None, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Get all the EFS mount point properties for a specific filesystemid or the properties for a specific mounttargetid. One or the other must be specified filesystemid (string) - ID of the file system whose mount targets to list Must be specified if mounttargetid is not mounttargetid (string) - ID of the mount target to have its properties returned Must be specified if filesystemid is not returns (list[dict]) - list of all mount point properties CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_mount_targets ''' result = None client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) if filesystemid: response = client.describe_mount_targets(FileSystemId=filesystemid) result = response["MountTargets"] while "NextMarker" in response: response = client.describe_mount_targets(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["MountTargets"]) elif mounttargetid: response = client.describe_mount_targets(MountTargetId=mounttargetid) result = response["MountTargets"] return result def get_tags(filesystemid, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Return the tags associated with an EFS instance. filesystemid (string) - ID of the file system whose tags to list returns (list) - list of tags as key/value pairs CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.get_tags efs-id ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) response = client.describe_tags(FileSystemId=filesystemid) result = response["Tags"] while "NextMarker" in response: response = client.describe_tags(FileSystemId=filesystemid, Marker=response["NextMarker"]) result.extend(response["Tags"]) return result
saltstack/salt
salt/states/win_wusa.py
installed
python
def installed(name, source): ''' Ensure an update is installed on the minion Args: name(str): Name of the Windows KB ("KB123456") source (str): Source of .msu file corresponding to the KB Example: .. code-block:: yaml KB123456: wusa.installed: - source: salt://kb123456.msu ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Input validation if not name: raise SaltInvocationError('Must specify a KB "name"') if not source: raise SaltInvocationError('Must specify a "source" file to install') # Is the KB already installed if __salt__['wusa.is_installed'](name): ret['result'] = True ret['comment'] = '{0} already installed'.format(name) return ret # Check for test=True if __opts__['test'] is True: ret['result'] = None ret['comment'] = '{0} would be installed'.format(name) ret['result'] = None return ret # Cache the file cached_source_path = __salt__['cp.cache_file'](path=source, saltenv=__env__) if not cached_source_path: msg = 'Unable to cache {0} from saltenv "{1}"'.format( salt.utils.url.redact_http_basic_auth(source), __env__) ret['comment'] = msg return ret # Install the KB __salt__['wusa.install'](cached_source_path) # Verify successful install if __salt__['wusa.is_installed'](name): ret['comment'] = '{0} was installed'.format(name) ret['changes'] = {'old': False, 'new': True} ret['result'] = True else: ret['comment'] = '{0} failed to install'.format(name) return ret
Ensure an update is installed on the minion Args: name(str): Name of the Windows KB ("KB123456") source (str): Source of .msu file corresponding to the KB Example: .. code-block:: yaml KB123456: wusa.installed: - source: salt://kb123456.msu
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_wusa.py#L37-L100
[ "def redact_http_basic_auth(output):\n '''\n Remove HTTP user and password\n '''\n # We can't use re.compile because re.compile(someregex).sub() doesn't\n # support flags even in Python 2.7.\n url_re = '(https?)://.*@'\n redacted = r'\\1://<redacted>@'\n if sys.version_info >= (2, 7):\n # re.sub() supports flags as of 2.7, use this to do a case-insensitive\n # match.\n return re.sub(url_re, redacted, output, flags=re.IGNORECASE)\n else:\n # We're on python 2.6, test if a lowercased version of the output\n # string matches the regex...\n if re.search(url_re, output.lower()):\n # ... and if it does, perform the regex substitution.\n return re.sub(url_re, redacted, output.lower())\n # No match, just return the original string\n return output\n" ]
# -*- coding: utf-8 -*- ''' Microsoft Updates (KB) Management This module provides the ability to enforce KB installations from files (.msu), without WSUS or Windows Update .. versionadded:: 2018.3.4 ''' # Import python libs from __future__ import absolute_import, unicode_literals import logging # Import salt libs import salt.utils.platform import salt.utils.url from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'wusa' def __virtual__(): ''' Load only on Windows ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows systems' return __virtualname__ def uninstalled(name): ''' Ensure an update is uninstalled from the minion Args: name(str): Name of the Windows KB ("KB123456") Example: .. code-block:: yaml KB123456: wusa.uninstalled ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Is the KB already uninstalled if not __salt__['wusa.is_installed'](name): ret['result'] = True ret['comment'] = '{0} already uninstalled'.format(name) return ret # Check for test=True if __opts__['test'] is True: ret['result'] = None ret['comment'] = '{0} would be uninstalled'.format(name) ret['result'] = None return ret # Uninstall the KB __salt__['wusa.uninstall'](name) # Verify successful uninstall if not __salt__['wusa.is_installed'](name): ret['comment'] = '{0} was uninstalled'.format(name) ret['changes'] = {'old': True, 'new': False} ret['result'] = True else: ret['comment'] = '{0} failed to uninstall'.format(name) return ret
saltstack/salt
salt/states/win_wusa.py
uninstalled
python
def uninstalled(name): ''' Ensure an update is uninstalled from the minion Args: name(str): Name of the Windows KB ("KB123456") Example: .. code-block:: yaml KB123456: wusa.uninstalled ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Is the KB already uninstalled if not __salt__['wusa.is_installed'](name): ret['result'] = True ret['comment'] = '{0} already uninstalled'.format(name) return ret # Check for test=True if __opts__['test'] is True: ret['result'] = None ret['comment'] = '{0} would be uninstalled'.format(name) ret['result'] = None return ret # Uninstall the KB __salt__['wusa.uninstall'](name) # Verify successful uninstall if not __salt__['wusa.is_installed'](name): ret['comment'] = '{0} was uninstalled'.format(name) ret['changes'] = {'old': True, 'new': False} ret['result'] = True else: ret['comment'] = '{0} failed to uninstall'.format(name) return ret
Ensure an update is uninstalled from the minion Args: name(str): Name of the Windows KB ("KB123456") Example: .. code-block:: yaml KB123456: wusa.uninstalled
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_wusa.py#L103-L148
null
# -*- coding: utf-8 -*- ''' Microsoft Updates (KB) Management This module provides the ability to enforce KB installations from files (.msu), without WSUS or Windows Update .. versionadded:: 2018.3.4 ''' # Import python libs from __future__ import absolute_import, unicode_literals import logging # Import salt libs import salt.utils.platform import salt.utils.url from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'wusa' def __virtual__(): ''' Load only on Windows ''' if not salt.utils.platform.is_windows(): return False, 'Only available on Windows systems' return __virtualname__ def installed(name, source): ''' Ensure an update is installed on the minion Args: name(str): Name of the Windows KB ("KB123456") source (str): Source of .msu file corresponding to the KB Example: .. code-block:: yaml KB123456: wusa.installed: - source: salt://kb123456.msu ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} # Input validation if not name: raise SaltInvocationError('Must specify a KB "name"') if not source: raise SaltInvocationError('Must specify a "source" file to install') # Is the KB already installed if __salt__['wusa.is_installed'](name): ret['result'] = True ret['comment'] = '{0} already installed'.format(name) return ret # Check for test=True if __opts__['test'] is True: ret['result'] = None ret['comment'] = '{0} would be installed'.format(name) ret['result'] = None return ret # Cache the file cached_source_path = __salt__['cp.cache_file'](path=source, saltenv=__env__) if not cached_source_path: msg = 'Unable to cache {0} from saltenv "{1}"'.format( salt.utils.url.redact_http_basic_auth(source), __env__) ret['comment'] = msg return ret # Install the KB __salt__['wusa.install'](cached_source_path) # Verify successful install if __salt__['wusa.is_installed'](name): ret['comment'] = '{0} was installed'.format(name) ret['changes'] = {'old': False, 'new': True} ret['result'] = True else: ret['comment'] = '{0} failed to install'.format(name) return ret
saltstack/salt
salt/utils/smtp.py
send
python
def send(kwargs, opts): ''' Send an email with the data ''' opt_keys = ( 'smtp.to', 'smtp.from', 'smtp.host', 'smtp.port', 'smtp.tls', 'smtp.username', 'smtp.password', 'smtp.subject', 'smtp.gpgowner', 'smtp.content', ) config = {} for key in opt_keys: config[key] = opts.get(key, '') config.update(kwargs) if not config['smtp.port']: config['smtp.port'] = 25 log.debug('SMTP port has been set to %s', config['smtp.port']) log.debug("smtp_return: Subject is '%s'", config['smtp.subject']) if HAS_GNUPG and config['smtp.gpgowner']: gpg = gnupg.GPG( gnupghome=os.path.expanduser( '~{0}/.gnupg'.format(config['smtp.gpgowner']) ), options=['--trust-model always'] ) encrypted_data = gpg.encrypt(config['smtp.content'], config['smtp.to']) if encrypted_data.ok: log.debug('smtp_return: Encryption successful') config['smtp.content'] = six.text_type(encrypted_data) else: log.error( 'SMTP: Encryption failed, only an error message will be sent' ) config['smtp.content'] = ( 'Encryption failed, the return data was not sent.' '\r\n\r\n{0}\r\n{1}' ).format(encrypted_data.status, encrypted_data.stderr) message = ( 'From: {0}\r\n' 'To: {1}\r\n' 'Date: {2}\r\n' 'Subject: {3}\r\n' '\r\n' '{4}' ).format( config['smtp.from'], config['smtp.to'], formatdate(localtime=True), config['smtp.subject'], config['smtp.content'], ) log.debug('smtp_return: Connecting to the server...') server = smtplib.SMTP(config['smtp.host'], int(config['smtp.port'])) if config['smtp.tls'] is True: server.starttls() log.debug('smtp_return: TLS enabled') if config['smtp.username'] and config['smtp.password']: server.login(config['smtp.username'], config['smtp.password']) log.debug('smtp_return: Authenticated') server.sendmail(config['smtp.from'], config['smtp.to'], message) log.debug('smtp_return: Message sent.') server.quit()
Send an email with the data
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/smtp.py#L50-L127
null
# -*- coding: utf-8 -*- ''' Return salt data via email The following fields can be set in the minion conf file: smtp.from (required) smtp.to (required) smtp.host (required) smtp.port (optional, defaults to 25) smtp.username (optional) smtp.password (optional) smtp.tls (optional, defaults to False) smtp.subject (optional, but helpful) smtp.gpgowne' (optional) smtp.fields (optional) smtp.content (optional) There are a few things to keep in mind: * If a username is used, a password is also required. It is recommended (but not required) to use the TLS setting when authenticating. * You should at least declare a subject, but you don't have to. * The use of encryption, i.e. setting gpgowner in your settings, requires python-gnupg to be installed. * The field gpgowner specifies a user's ~/.gpg directory. This must contain a gpg public key matching the address the mail is sent to. If left unset, no encryption will be used. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os import logging import smtplib from email.utils import formatdate from salt.ext import six try: import gnupg HAS_GNUPG = True except ImportError: HAS_GNUPG = False log = logging.getLogger(__name__)
saltstack/salt
salt/modules/netaddress.py
list_cidr_ips
python
def list_cidr_ips(cidr): ''' Get a list of IP addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return [six.text_type(ip) for ip in list(ips)]
Get a list of IP addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips 192.168.0.0/20
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netaddress.py#L35-L44
null
# -*- coding: utf-8 -*- ''' Module for getting information about network addresses. .. versionadded:: 2016.3.0 :depends: netaddr ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.ext import six __virtualname__ = 'netaddress' # Import third party libs try: import netaddr HAS_NETADDR = True except ImportError as e: HAS_NETADDR = False def __virtual__(): ''' Only load if netaddr library exist. ''' if not HAS_NETADDR: return (False, 'The netaddress execution module cannot be loaded: ' 'netaddr python library is not installed.') return __virtualname__ def list_cidr_ips_ipv6(cidr): ''' Get a list of IPv6 addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips_ipv6 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return [six.text_type(ip.ipv6()) for ip in list(ips)] def cidr_netmask(cidr): ''' Get the netmask address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return six.text_type(ips.netmask) def cidr_broadcast(cidr): ''' Get the broadcast address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return six.text_type(ips.broadcast)
saltstack/salt
salt/modules/netaddress.py
list_cidr_ips_ipv6
python
def list_cidr_ips_ipv6(cidr): ''' Get a list of IPv6 addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips_ipv6 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return [six.text_type(ip.ipv6()) for ip in list(ips)]
Get a list of IPv6 addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips_ipv6 192.168.0.0/20
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netaddress.py#L47-L56
null
# -*- coding: utf-8 -*- ''' Module for getting information about network addresses. .. versionadded:: 2016.3.0 :depends: netaddr ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.ext import six __virtualname__ = 'netaddress' # Import third party libs try: import netaddr HAS_NETADDR = True except ImportError as e: HAS_NETADDR = False def __virtual__(): ''' Only load if netaddr library exist. ''' if not HAS_NETADDR: return (False, 'The netaddress execution module cannot be loaded: ' 'netaddr python library is not installed.') return __virtualname__ def list_cidr_ips(cidr): ''' Get a list of IP addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return [six.text_type(ip) for ip in list(ips)] def cidr_netmask(cidr): ''' Get the netmask address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return six.text_type(ips.netmask) def cidr_broadcast(cidr): ''' Get the broadcast address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return six.text_type(ips.broadcast)
saltstack/salt
salt/modules/netaddress.py
cidr_netmask
python
def cidr_netmask(cidr): ''' Get the netmask address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return six.text_type(ips.netmask)
Get the netmask address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netaddress.py#L59-L68
null
# -*- coding: utf-8 -*- ''' Module for getting information about network addresses. .. versionadded:: 2016.3.0 :depends: netaddr ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.ext import six __virtualname__ = 'netaddress' # Import third party libs try: import netaddr HAS_NETADDR = True except ImportError as e: HAS_NETADDR = False def __virtual__(): ''' Only load if netaddr library exist. ''' if not HAS_NETADDR: return (False, 'The netaddress execution module cannot be loaded: ' 'netaddr python library is not installed.') return __virtualname__ def list_cidr_ips(cidr): ''' Get a list of IP addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return [six.text_type(ip) for ip in list(ips)] def list_cidr_ips_ipv6(cidr): ''' Get a list of IPv6 addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips_ipv6 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return [six.text_type(ip.ipv6()) for ip in list(ips)] def cidr_broadcast(cidr): ''' Get the broadcast address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return six.text_type(ips.broadcast)
saltstack/salt
salt/modules/netaddress.py
cidr_broadcast
python
def cidr_broadcast(cidr): ''' Get the broadcast address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return six.text_type(ips.broadcast)
Get the broadcast address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netaddress.py#L71-L80
null
# -*- coding: utf-8 -*- ''' Module for getting information about network addresses. .. versionadded:: 2016.3.0 :depends: netaddr ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.ext import six __virtualname__ = 'netaddress' # Import third party libs try: import netaddr HAS_NETADDR = True except ImportError as e: HAS_NETADDR = False def __virtual__(): ''' Only load if netaddr library exist. ''' if not HAS_NETADDR: return (False, 'The netaddress execution module cannot be loaded: ' 'netaddr python library is not installed.') return __virtualname__ def list_cidr_ips(cidr): ''' Get a list of IP addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return [six.text_type(ip) for ip in list(ips)] def list_cidr_ips_ipv6(cidr): ''' Get a list of IPv6 addresses from a CIDR. CLI example:: salt myminion netaddress.list_cidr_ips_ipv6 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return [six.text_type(ip.ipv6()) for ip in list(ips)] def cidr_netmask(cidr): ''' Get the netmask address associated with a CIDR address. CLI example:: salt myminion netaddress.cidr_netmask 192.168.0.0/20 ''' ips = netaddr.IPNetwork(cidr) return six.text_type(ips.netmask)
saltstack/salt
salt/modules/openstack_mng.py
restart_service
python
def restart_service(service_name, minimum_running_time=None): ''' Restart OpenStack service immediately, or only if it's running longer than specified value CLI Example: .. code-block:: bash salt '*' openstack_mng.restart_service neutron salt '*' openstack_mng.restart_service neutron minimum_running_time=600 ''' if minimum_running_time: ret_code = False # get system services list for interesting openstack service services = __salt__['cmd.run'](['/usr/bin/openstack-service', 'list', service_name]).split('\n') for service in services: service_info = __salt__['service.show'](service) with salt.utils.files.fopen('/proc/uptime') as rfh: boot_time = float( salt.utils.stringutils.to_unicode( rfh.read() ).split(' ')[0] ) expr_time = int(service_info.get('ExecMainStartTimestampMonotonic', 0)) / 1000000 < boot_time - minimum_running_time expr_active = service_info.get('ActiveState') == "active" if expr_time or not expr_active: # restart specific system service ret = __salt__['service.restart'](service) if ret: ret_code = True return ret_code else: # just restart os_cmd = ['/usr/bin/openstack-service', 'restart', service_name] return __salt__['cmd.retcode'](os_cmd) == 0
Restart OpenStack service immediately, or only if it's running longer than specified value CLI Example: .. code-block:: bash salt '*' openstack_mng.restart_service neutron salt '*' openstack_mng.restart_service neutron minimum_running_time=600
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openstack_mng.py#L65-L104
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n" ]
# -*- coding: utf-8 -*- ''' Module for OpenStack Management :codeauthor: Konrad Mosoń <mosonkonrad@gmail.com> :maturity: new :depends: openstack-utils :platform: linux ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import logging import os.path # Import salt libs import salt.utils.files import salt.utils.stringutils log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'openstack_mng' def __virtual__(): ''' Only load this module if openstack-service is installed ''' if os.path.isfile('/usr/bin/openstack-service'): return __virtualname__ else: return (False, 'The openstack-service binary could not be found.') def start_service(service_name): ''' Start OpenStack service immediately CLI Example: .. code-block:: bash salt '*' openstack_mng.start_service neutron ''' os_cmd = ['/usr/bin/openstack-service', 'start', service_name] return __salt__['cmd.retcode'](os_cmd) == 0 def stop_service(service_name): ''' Stop OpenStack service immediately CLI Example: .. code-block:: bash salt '*' openstack_mng.stop_service neutron ''' os_cmd = ['/usr/bin/openstack-service', 'stop', service_name] return __salt__['cmd.retcode'](os_cmd) == 0
saltstack/salt
salt/returners/mysql.py
_get_options
python
def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options
Returns options used for the MySQL connection.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L190-L227
null
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
_get_serv
python
def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK")
Return a mysql cursor
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L231-L286
null
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
returner
python
def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.')
Return data to a mysql server
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L289-L311
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument\n '''\n Do any work necessary to prepare a JID, including sending a custom id\n '''\n return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)\n", "def save_load(jid, load, minions=None):\n '''\n Save the load to the specified jid id\n '''\n with _get_serv(commit=True) as cur:\n\n sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)'''\n\n try:\n cur.execute(sql, (jid, salt.utils.json.dumps(load)))\n except MySQLdb.IntegrityError:\n # https://github.com/saltstack/salt/issues/22171\n # Without this try/except we get tons of duplicate entry errors\n # which result in job returns not being stored properly\n pass\n" ]
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
event_return
python
def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id']))
Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L314-L327
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
save_load
python
def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass
Save the load to the specified jid id
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L330-L344
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
get_load
python
def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {}
Return the load data that marks a specified jid
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L354-L365
null
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
get_jid
python
def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret
Return the information returned when the specified job id was executed
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L368-L383
null
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
get_fun
python
def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret
Return a dict of the last function called for all minions
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L386-L407
null
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
get_jids_filter
python
def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret
Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L429-L452
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n", "def format_jid_instance_ext(jid, job):\n '''\n Format the jid correctly with jid included\n '''\n ret = format_job_instance(job)\n ret.update({\n 'JID': jid,\n 'StartTime': jid_to_time(jid)})\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
get_minions
python
def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret
Return a list of minions
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L455-L469
null
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
_purge_jobs
python
def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True
Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L479-L513
null
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp) def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
_archive_jobs
python
def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp)
Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L516-L570
null
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
saltstack/salt
salt/returners/mysql.py
clean_old_jobs
python
def clean_old_jobs(): ''' Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return: ''' if __opts__.get('keep_jobs', False) and int(__opts__.get('keep_jobs', 0)) > 0: try: with _get_serv() as cur: sql = 'select date_sub(now(), interval {0} hour) as stamp;'.format(__opts__['keep_jobs']) cur.execute(sql) rows = cur.fetchall() stamp = rows[0][0] if __opts__.get('archive_jobs', False): _archive_jobs(stamp) else: _purge_jobs(stamp) except MySQLdb.Error as e: log.error('Mysql returner was unable to get timestamp for purge/archive of jobs') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e))
Called in the master's event loop every loop_interval. Archives and/or deletes the events and job details from the database. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/mysql.py#L573-L594
null
# -*- coding: utf-8 -*- ''' Return data to a mysql server :maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com> :maturity: mature :depends: python-mysqldb :platform: all To enable this returner, the minion will need the python client for mysql installed and the following values configured in the minion or master config. These are the defaults: .. code-block:: yaml mysql.host: 'salt' mysql.user: 'salt' mysql.pass: 'salt' mysql.db: 'salt' mysql.port: 3306 SSL is optional. The defaults are set to None. If you do not want to use SSL, either exclude these options or set them to None. .. code-block:: yaml mysql.ssl_ca: None mysql.ssl_cert: None mysql.ssl_key: None Alternative configuration values can be used by prefacing the configuration with `alternative.`. Any values not found in the alternative configuration will be pulled from the default location. As stated above, SSL configuration is optional. The following ssl options are simply for illustration purposes: .. code-block:: yaml alternative.mysql.host: 'salt' alternative.mysql.user: 'salt' alternative.mysql.pass: 'salt' alternative.mysql.db: 'salt' alternative.mysql.port: 3306 alternative.mysql.ssl_ca: '/etc/pki/mysql/certs/localhost.pem' alternative.mysql.ssl_cert: '/etc/pki/mysql/certs/localhost.crt' alternative.mysql.ssl_key: '/etc/pki/mysql/certs/localhost.key' Should you wish the returner data to be cleaned out every so often, set `keep_jobs` to the number of hours for the jobs to live in the tables. Setting it to `0` or leaving it unset will cause the data to stay in the tables. Should you wish to archive jobs in a different table for later processing, set `archive_jobs` to True. Salt will create 3 archive tables - `jids_archive` - `salt_returns_archive` - `salt_events_archive` and move the contents of `jids`, `salt_returns`, and `salt_events` that are more than `keep_jobs` hours old to these tables. Use the following mysql database schema: .. code-block:: sql CREATE DATABASE `salt` DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci; USE `salt`; -- -- Table structure for table `jids` -- DROP TABLE IF EXISTS `jids`; CREATE TABLE `jids` ( `jid` varchar(255) NOT NULL, `load` mediumtext NOT NULL, UNIQUE KEY `jid` (`jid`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE INDEX jid ON jids(jid) USING BTREE; -- -- Table structure for table `salt_returns` -- DROP TABLE IF EXISTS `salt_returns`; CREATE TABLE `salt_returns` ( `fun` varchar(50) NOT NULL, `jid` varchar(255) NOT NULL, `return` mediumtext NOT NULL, `id` varchar(255) NOT NULL, `success` varchar(10) NOT NULL, `full_ret` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, KEY `id` (`id`), KEY `jid` (`jid`), KEY `fun` (`fun`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- -- Table structure for table `salt_events` -- DROP TABLE IF EXISTS `salt_events`; CREATE TABLE `salt_events` ( `id` BIGINT NOT NULL AUTO_INCREMENT, `tag` varchar(255) NOT NULL, `data` mediumtext NOT NULL, `alter_time` TIMESTAMP DEFAULT CURRENT_TIMESTAMP, `master_id` varchar(255) NOT NULL, PRIMARY KEY (`id`), KEY `tag` (`tag`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; Required python modules: MySQLdb To use the mysql returner, append '--return mysql' to the salt command. .. code-block:: bash salt '*' test.ping --return mysql To use the alternative configuration, append '--return_config alternative' to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return mysql --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return mysql --return_kwargs '{"db": "another-salt"}' ''' from __future__ import absolute_import, print_function, unicode_literals # Let's not allow PyLint complain about string substitution # pylint: disable=W1321,E1321 # Import python libs from contextlib import contextmanager import sys import logging # Import salt libs import salt.returners import salt.utils.jid import salt.utils.json import salt.exceptions # Import 3rd-party libs from salt.ext import six try: # Trying to import MySQLdb import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.connections import OperationalError except ImportError: try: # MySQLdb import failed, try to import PyMySQL import pymysql pymysql.install_as_MySQLdb() import MySQLdb import MySQLdb.cursors import MySQLdb.converters from MySQLdb.err import OperationalError except ImportError: MySQLdb = None log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'mysql' def __virtual__(): ''' Confirm that a python mysql client is installed. ''' return bool(MySQLdb), 'No python mysql client installed.' if MySQLdb is None else '' def _get_options(ret=None): ''' Returns options used for the MySQL connection. ''' defaults = {'host': 'salt', 'user': 'salt', 'pass': 'salt', 'db': 'salt', 'port': 3306, 'ssl_ca': None, 'ssl_cert': None, 'ssl_key': None} attrs = {'host': 'host', 'user': 'user', 'pass': 'pass', 'db': 'db', 'port': 'port', 'ssl_ca': 'ssl_ca', 'ssl_cert': 'ssl_cert', 'ssl_key': 'ssl_key'} _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) # post processing for k, v in six.iteritems(_options): if isinstance(v, six.string_types) and v.lower() == 'none': # Ensure 'None' is rendered as None _options[k] = None if k == 'port': # Ensure port is an int _options[k] = int(v) return _options @contextmanager def _get_serv(ret=None, commit=False): ''' Return a mysql cursor ''' _options = _get_options(ret) connect = True if __context__ and 'mysql_returner_conn' in __context__: try: log.debug('Trying to reuse MySQL connection pool') conn = __context__['mysql_returner_conn'] conn.ping() connect = False except OperationalError as exc: log.debug('OperationalError on ping: %s', exc) if connect: log.debug('Generating new MySQL connection pool') try: # An empty ssl_options dictionary passed to MySQLdb.connect will # effectively connect w/o SSL. ssl_options = {} if _options.get('ssl_ca'): ssl_options['ca'] = _options.get('ssl_ca') if _options.get('ssl_cert'): ssl_options['cert'] = _options.get('ssl_cert') if _options.get('ssl_key'): ssl_options['key'] = _options.get('ssl_key') conn = MySQLdb.connect(host=_options.get('host'), user=_options.get('user'), passwd=_options.get('pass'), db=_options.get('db'), port=_options.get('port'), ssl=ssl_options) try: __context__['mysql_returner_conn'] = conn except TypeError: pass except OperationalError as exc: raise salt.exceptions.SaltMasterError('MySQL returner could not connect to database: {exc}'.format(exc=exc)) cursor = conn.cursor() try: yield cursor except MySQLdb.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err else: if commit: cursor.execute("COMMIT") else: cursor.execute("ROLLBACK") def returner(ret): ''' Return data to a mysql server ''' # if a minion is returning a standalone job, get a jobid if ret['jid'] == 'req': ret['jid'] = prep_jid(nocache=ret.get('nocache', False)) save_load(ret['jid'], ret) try: with _get_serv(ret, commit=True) as cur: sql = '''INSERT INTO `salt_returns` (`fun`, `jid`, `return`, `id`, `success`, `full_ret`) VALUES (%s, %s, %s, %s, %s, %s)''' cur.execute(sql, (ret['fun'], ret['jid'], salt.utils.json.dumps(ret['return']), ret['id'], ret.get('success', False), salt.utils.json.dumps(ret))) except salt.exceptions.SaltMasterError as exc: log.critical(exc) log.critical('Could not store return with MySQL returner. MySQL server unavailable.') def event_return(events): ''' Return event to mysql server Requires that configuration be enabled via 'event_return' option in master config. ''' with _get_serv(events, commit=True) as cur: for event in events: tag = event.get('tag', '') data = event.get('data', '') sql = '''INSERT INTO `salt_events` (`tag`, `data`, `master_id`) VALUES (%s, %s, %s)''' cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id'])) def save_load(jid, load, minions=None): ''' Save the load to the specified jid id ''' with _get_serv(commit=True) as cur: sql = '''INSERT INTO `jids` (`jid`, `load`) VALUES (%s, %s)''' try: cur.execute(sql, (jid, salt.utils.json.dumps(load))) except MySQLdb.IntegrityError: # https://github.com/saltstack/salt/issues/22171 # Without this try/except we get tons of duplicate entry errors # which result in job returns not being stored properly pass def save_minions(jid, minions, syndic_id=None): # pylint: disable=unused-argument ''' Included for API consistency ''' pass def get_load(jid): ''' Return the load data that marks a specified jid ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT `load` FROM `jids` WHERE `jid` = %s;''' cur.execute(sql, (jid,)) data = cur.fetchone() if data: return salt.utils.json.loads(data[0]) return {} def get_jid(jid): ''' Return the information returned when the specified job id was executed ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT id, full_ret FROM `salt_returns` WHERE `jid` = %s''' cur.execute(sql, (jid,)) data = cur.fetchall() ret = {} if data: for minion, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_fun(fun): ''' Return a dict of the last function called for all minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT s.id,s.jid, s.full_ret FROM `salt_returns` s JOIN ( SELECT MAX(`jid`) as jid from `salt_returns` GROUP BY fun, id) max ON s.jid = max.jid WHERE s.fun = %s ''' cur.execute(sql, (fun,)) data = cur.fetchall() ret = {} if data: for minion, _, full_ret in data: ret[minion] = salt.utils.json.loads(full_ret) return ret def get_jids(): ''' Return a list of all job ids ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT `jid`, `load` FROM `jids`''' cur.execute(sql) data = cur.fetchall() ret = {} for jid in data: ret[jid[0]] = salt.utils.jid.format_jid_instance( jid[0], salt.utils.json.loads(jid[1])) return ret def get_jids_filter(count, filter_find_job=True): ''' Return a list of all job ids :param int count: show not more than the count of most recent jobs :param bool filter_find_jobs: filter out 'saltutil.find_job' jobs ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT * FROM ( SELECT DISTINCT `jid` ,`load` FROM `jids` {0} ORDER BY `jid` DESC limit {1} ) `tmp` ORDER BY `jid`;''' where = '''WHERE `load` NOT LIKE '%"fun": "saltutil.find_job"%' ''' cur.execute(sql.format(where if filter_find_job else '', count)) data = cur.fetchall() ret = [] for jid in data: ret.append(salt.utils.jid.format_jid_instance_ext( jid[0], salt.utils.json.loads(jid[1]))) return ret def get_minions(): ''' Return a list of minions ''' with _get_serv(ret=None, commit=True) as cur: sql = '''SELECT DISTINCT id FROM `salt_returns`''' cur.execute(sql) data = cur.fetchall() ret = [] for minion in data: ret.append(minion[0]) return ret def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__) def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cur: try: sql = 'delete from `jids` where jid in (select distinct jid from salt_returns where alter_time < %s)' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_returns` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'delete from `salt_events` where alter_time < %s' cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to delete contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return True def _archive_jobs(timestamp): ''' Copy rows to a set of backup tables, then purge rows. :param timestamp: Archive rows older than this timestamp :return: ''' source_tables = ['jids', 'salt_returns', 'salt_events'] with _get_serv() as cur: target_tables = {} for table_name in source_tables: try: tmp_table_name = table_name + '_archive' sql = 'create table if not exists {0} like {1}'.format(tmp_table_name, table_name) cur.execute(sql) cur.execute('COMMIT') target_tables[table_name] = tmp_table_name except MySQLdb.Error as e: log.error('mysql returner archiver was unable to create the archive tables.') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where jid in (select distinct jid from salt_returns where alter_time < %s)'.format(target_tables['jids'], 'jids') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'jids\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) except Exception as e: log.error(e) raise try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_returns'], 'salt_returns') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_returns\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) try: sql = 'insert into `{0}` select * from `{1}` where alter_time < %s'.format(target_tables['salt_events'], 'salt_events') cur.execute(sql, (timestamp,)) cur.execute('COMMIT') except MySQLdb.Error as e: log.error('mysql returner archiver was unable to copy contents of table \'salt_events\'') log.error(six.text_type(e)) raise salt.exceptions.SaltRunnerError(six.text_type(e)) return _purge_jobs(timestamp)
saltstack/salt
salt/returners/syslog_return.py
_verify_options
python
def _verify_options(options): ''' Verify options and log warnings Returns True if all options can be verified, otherwise False ''' # sanity check all vals used for bitwise operations later bitwise_args = [('level', options['level']), ('facility', options['facility']) ] bitwise_args.extend([('option', x) for x in options['options']]) for opt_name, opt in bitwise_args: if not hasattr(syslog, opt): log.error('syslog has no attribute %s', opt) return False if not isinstance(getattr(syslog, opt), int): log.error('%s is not a valid syslog %s', opt, opt_name) return False # Sanity check tag if 'tag' in options: if not isinstance(options['tag'], six.string_types): log.error('tag must be a string') return False if len(options['tag']) > 32: log.error('tag size is limited to 32 characters') return False return True
Verify options and log warnings Returns True if all options can be verified, otherwise False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/syslog_return.py#L135-L166
null
# -*- coding: utf-8 -*- ''' Return data to the host operating system's syslog facility To use the syslog returner, append '--return syslog' to the salt command. .. code-block:: bash salt '*' test.ping --return syslog The following fields can be set in the minion conf file:: syslog.level (optional, Default: LOG_INFO) syslog.facility (optional, Default: LOG_USER) syslog.tag (optional, Default: salt-minion) syslog.options (list, optional, Default: []) Available levels, facilities, and options can be found in the ``syslog`` docs for your python version. .. note:: The default tag comes from ``sys.argv[0]`` which is usually "salt-minion" but could be different based on the specific environment. Configuration example: .. code-block:: yaml syslog.level: 'LOG_ERR' syslog.facility: 'LOG_DAEMON' syslog.tag: 'mysalt' syslog.options: - LOG_PID Of course you can also nest the options: .. code-block:: yaml syslog: level: 'LOG_ERR' facility: 'LOG_DAEMON' tag: 'mysalt' options: - LOG_PID Alternative configuration values can be used by prefacing the configuration. Any values not found in the alternative configuration will be pulled from the default location: .. code-block:: yaml alternative.syslog.level: 'LOG_WARN' alternative.syslog.facility: 'LOG_NEWS' To use the alternative configuration, append ``--return_config alternative`` to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return syslog --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return syslog --return_kwargs '{"level": "LOG_DEBUG"}' .. note:: Syslog server implementations may have limits on the maximum record size received by the client. This may lead to job return data being truncated in the syslog server's logs. For example, for rsyslog on RHEL-based systems, the default maximum record size is approximately 2KB (which return data can easily exceed). This is configurable in rsyslog.conf via the $MaxMessageSize config parameter. Please consult your syslog implmentation's documentation to determine how to adjust this limit. ''' from __future__ import absolute_import, print_function, unicode_literals import logging # Import python libs try: import syslog HAS_SYSLOG = True except ImportError: HAS_SYSLOG = False # Import Salt libs import salt.utils.jid import salt.utils.json import salt.returners from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'syslog' def _get_options(ret=None): ''' Get the returner options from salt. ''' defaults = {'level': 'LOG_INFO', 'facility': 'LOG_USER', 'options': [] } attrs = {'level': 'level', 'facility': 'facility', 'tag': 'tag', 'options': 'options' } _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) return _options def __virtual__(): if not HAS_SYSLOG: return False, 'Could not import syslog returner; syslog is not installed.' return __virtualname__ def returner(ret): ''' Return data to the local syslog ''' _options = _get_options(ret) if not _verify_options(_options): return # Get values from syslog module level = getattr(syslog, _options['level']) facility = getattr(syslog, _options['facility']) # parse for syslog options logoption = 0 for opt in _options['options']: logoption = logoption | getattr(syslog, opt) # Open syslog correctly based on options and tag if 'tag' in _options: syslog.openlog(ident=salt.utils.stringutils.to_str(_options['tag']), logoption=logoption) else: syslog.openlog(logoption=logoption) # Send log of given level and facility syslog.syslog(facility | level, salt.utils.json.dumps(ret)) # Close up to reset syslog to defaults syslog.closelog() def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
saltstack/salt
salt/returners/syslog_return.py
returner
python
def returner(ret): ''' Return data to the local syslog ''' _options = _get_options(ret) if not _verify_options(_options): return # Get values from syslog module level = getattr(syslog, _options['level']) facility = getattr(syslog, _options['facility']) # parse for syslog options logoption = 0 for opt in _options['options']: logoption = logoption | getattr(syslog, opt) # Open syslog correctly based on options and tag if 'tag' in _options: syslog.openlog(ident=salt.utils.stringutils.to_str(_options['tag']), logoption=logoption) else: syslog.openlog(logoption=logoption) # Send log of given level and facility syslog.syslog(facility | level, salt.utils.json.dumps(ret)) # Close up to reset syslog to defaults syslog.closelog()
Return data to the local syslog
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/returners/syslog_return.py#L175-L204
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n", "def _get_options(ret=None):\n '''\n Get the returner options from salt.\n '''\n\n defaults = {'level': 'LOG_INFO',\n 'facility': 'LOG_USER',\n 'options': []\n }\n\n attrs = {'level': 'level',\n 'facility': 'facility',\n 'tag': 'tag',\n 'options': 'options'\n }\n\n _options = salt.returners.get_returner_options(__virtualname__,\n ret,\n attrs,\n __salt__=__salt__,\n __opts__=__opts__,\n defaults=defaults)\n return _options\n", "def _verify_options(options):\n '''\n Verify options and log warnings\n\n Returns True if all options can be verified,\n otherwise False\n '''\n\n # sanity check all vals used for bitwise operations later\n bitwise_args = [('level', options['level']),\n ('facility', options['facility'])\n ]\n bitwise_args.extend([('option', x) for x in options['options']])\n\n for opt_name, opt in bitwise_args:\n if not hasattr(syslog, opt):\n log.error('syslog has no attribute %s', opt)\n return False\n if not isinstance(getattr(syslog, opt), int):\n log.error('%s is not a valid syslog %s', opt, opt_name)\n return False\n\n # Sanity check tag\n if 'tag' in options:\n if not isinstance(options['tag'], six.string_types):\n log.error('tag must be a string')\n return False\n if len(options['tag']) > 32:\n log.error('tag size is limited to 32 characters')\n return False\n\n return True\n" ]
# -*- coding: utf-8 -*- ''' Return data to the host operating system's syslog facility To use the syslog returner, append '--return syslog' to the salt command. .. code-block:: bash salt '*' test.ping --return syslog The following fields can be set in the minion conf file:: syslog.level (optional, Default: LOG_INFO) syslog.facility (optional, Default: LOG_USER) syslog.tag (optional, Default: salt-minion) syslog.options (list, optional, Default: []) Available levels, facilities, and options can be found in the ``syslog`` docs for your python version. .. note:: The default tag comes from ``sys.argv[0]`` which is usually "salt-minion" but could be different based on the specific environment. Configuration example: .. code-block:: yaml syslog.level: 'LOG_ERR' syslog.facility: 'LOG_DAEMON' syslog.tag: 'mysalt' syslog.options: - LOG_PID Of course you can also nest the options: .. code-block:: yaml syslog: level: 'LOG_ERR' facility: 'LOG_DAEMON' tag: 'mysalt' options: - LOG_PID Alternative configuration values can be used by prefacing the configuration. Any values not found in the alternative configuration will be pulled from the default location: .. code-block:: yaml alternative.syslog.level: 'LOG_WARN' alternative.syslog.facility: 'LOG_NEWS' To use the alternative configuration, append ``--return_config alternative`` to the salt command. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' test.ping --return syslog --return_config alternative To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command. .. versionadded:: 2016.3.0 .. code-block:: bash salt '*' test.ping --return syslog --return_kwargs '{"level": "LOG_DEBUG"}' .. note:: Syslog server implementations may have limits on the maximum record size received by the client. This may lead to job return data being truncated in the syslog server's logs. For example, for rsyslog on RHEL-based systems, the default maximum record size is approximately 2KB (which return data can easily exceed). This is configurable in rsyslog.conf via the $MaxMessageSize config parameter. Please consult your syslog implmentation's documentation to determine how to adjust this limit. ''' from __future__ import absolute_import, print_function, unicode_literals import logging # Import python libs try: import syslog HAS_SYSLOG = True except ImportError: HAS_SYSLOG = False # Import Salt libs import salt.utils.jid import salt.utils.json import salt.returners from salt.ext import six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'syslog' def _get_options(ret=None): ''' Get the returner options from salt. ''' defaults = {'level': 'LOG_INFO', 'facility': 'LOG_USER', 'options': [] } attrs = {'level': 'level', 'facility': 'facility', 'tag': 'tag', 'options': 'options' } _options = salt.returners.get_returner_options(__virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__, defaults=defaults) return _options def _verify_options(options): ''' Verify options and log warnings Returns True if all options can be verified, otherwise False ''' # sanity check all vals used for bitwise operations later bitwise_args = [('level', options['level']), ('facility', options['facility']) ] bitwise_args.extend([('option', x) for x in options['options']]) for opt_name, opt in bitwise_args: if not hasattr(syslog, opt): log.error('syslog has no attribute %s', opt) return False if not isinstance(getattr(syslog, opt), int): log.error('%s is not a valid syslog %s', opt, opt_name) return False # Sanity check tag if 'tag' in options: if not isinstance(options['tag'], six.string_types): log.error('tag must be a string') return False if len(options['tag']) > 32: log.error('tag size is limited to 32 characters') return False return True def __virtual__(): if not HAS_SYSLOG: return False, 'Could not import syslog returner; syslog is not installed.' return __virtualname__ def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument ''' Do any work necessary to prepare a JID, including sending a custom id ''' return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid(__opts__)
saltstack/salt
salt/utils/parsers.py
OptionParser.error
python
def error(self, msg): ''' error(msg : string) Print a usage message incorporating 'msg' to stderr and exit. This keeps option parsing exit status uniform for all parsing errors. ''' self.print_usage(sys.stderr) self.exit(salt.defaults.exitcodes.EX_USAGE, '{0}: error: {1}\n'.format(self.get_prog_name(), msg))
error(msg : string) Print a usage message incorporating 'msg' to stderr and exit. This keeps option parsing exit status uniform for all parsing errors.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/parsers.py#L279-L287
null
class OptionParser(optparse.OptionParser, object): VERSION = version.__saltstack_version__.formatted_version usage = '%prog [options]' epilog = ('You can find additional help about %prog issuing "man %prog" ' 'or on http://docs.saltstack.com') description = None # Private attributes _mixin_prio_ = 100 # Setup multiprocessing logging queue listener _setup_mp_logging_listener_ = False def __init__(self, *args, **kwargs): kwargs.setdefault('version', '%prog {0}'.format(self.VERSION)) kwargs.setdefault('usage', self.usage) if self.description: kwargs.setdefault('description', self.description) if self.epilog: kwargs.setdefault('epilog', self.epilog) kwargs.setdefault('option_class', CustomOption) optparse.OptionParser.__init__(self, *args, **kwargs) if self.epilog and '%prog' in self.epilog: self.epilog = self.epilog.replace('%prog', self.get_prog_name()) def add_option_group(self, *args, **kwargs): option_group = optparse.OptionParser.add_option_group(self, *args, **kwargs) option_group.option_class = CustomOption return option_group def parse_args(self, args=None, values=None): options, args = optparse.OptionParser.parse_args(self, args, values) if 'args_stdin' in options.__dict__ and options.args_stdin is True: # Read additional options and/or arguments from stdin and combine # them with the options and arguments from the command line. new_inargs = sys.stdin.readlines() new_inargs = [arg.rstrip('\r\n') for arg in new_inargs] new_options, new_args = optparse.OptionParser.parse_args( self, new_inargs) options.__dict__.update(new_options.__dict__) args.extend(new_args) if six.PY2: args = salt.utils.data.decode(args) if options.versions_report: self.print_versions_report() self.options, self.args = options, args # Let's get some proper sys.stderr logging as soon as possible!!! # This logging handler will be removed once the proper console or # logfile logging is setup. temp_log_level = getattr(self.options, 'log_level', None) log.setup_temp_logger( 'error' if temp_log_level is None else temp_log_level ) # Gather and run the process_<option> functions in the proper order process_option_funcs = [] for option_key in options.__dict__: process_option_func = getattr( self, 'process_{0}'.format(option_key), None ) if process_option_func is not None: process_option_funcs.append(process_option_func) for process_option_func in _sorted(process_option_funcs): try: process_option_func() except Exception as err: # pylint: disable=broad-except logger.exception(err) self.error( 'Error while processing {0}: {1}'.format( process_option_func, traceback.format_exc(err) ) ) # Run the functions on self._mixin_after_parsed_funcs for mixin_after_parsed_func in self._mixin_after_parsed_funcs: # pylint: disable=no-member try: mixin_after_parsed_func(self) except Exception as err: # pylint: disable=broad-except logger.exception(err) self.error( 'Error while processing {0}: {1}'.format( mixin_after_parsed_func, traceback.format_exc(err) ) ) if self.config.get('conf_file', None) is not None: # pylint: disable=no-member logger.debug( 'Configuration file path: %s', self.config['conf_file'] # pylint: disable=no-member ) # Retain the standard behavior of optparse to return options and args return options, args def _populate_option_list(self, option_list, add_help=True): optparse.OptionParser._populate_option_list( self, option_list, add_help=add_help ) for mixin_setup_func in self._mixin_setup_funcs: # pylint: disable=no-member mixin_setup_func(self) def _add_version_option(self): optparse.OptionParser._add_version_option(self) self.add_option( '--versions-report', '-V', action='store_true', help='Show program\'s dependencies version number and exit.' ) def print_versions_report(self, file=sys.stdout): # pylint: disable=redefined-builtin print('\n'.join(version.versions_report()), file=file) self.exit(salt.defaults.exitcodes.EX_OK) def exit(self, status=0, msg=None): # Run the functions on self._mixin_after_parsed_funcs for mixin_before_exit_func in self._mixin_before_exit_funcs: # pylint: disable=no-member try: mixin_before_exit_func(self) except Exception as err: # pylint: disable=broad-except logger.exception(err) logger.error('Error while processing %s: %s', six.text_type(mixin_before_exit_func), traceback.format_exc(err)) if self._setup_mp_logging_listener_ is True: # Stop logging through the queue log.shutdown_multiprocessing_logging() # Stop the logging queue listener process log.shutdown_multiprocessing_logging_listener(daemonizing=True) if isinstance(msg, six.string_types) and msg and msg[-1] != '\n': msg = '{0}\n'.format(msg) optparse.OptionParser.exit(self, status, msg)
saltstack/salt
salt/utils/parsers.py
DaemonMixIn.check_running
python
def check_running(self): ''' Check if a pid file exists and if it is associated with a running process. ''' if self.check_pidfile(): pid = self.get_pidfile() if not salt.utils.platform.is_windows(): if self.check_pidfile() and self.is_daemonized(pid) and os.getppid() != pid: return True else: # We have no os.getppid() on Windows. Use salt.utils.win_functions.get_parent_pid if self.check_pidfile() and self.is_daemonized(pid) and salt.utils.win_functions.get_parent_pid() != pid: return True return False
Check if a pid file exists and if it is associated with a running process.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/parsers.py#L1045-L1060
[ "def check_pidfile(self):\n '''\n Report whether a pidfile exists\n '''\n from salt.utils.process import check_pidfile\n return check_pidfile(self.config['pidfile'])\n" ]
class DaemonMixIn(six.with_metaclass(MixInMeta, object)): _mixin_prio_ = 30 def _mixin_setup(self): self.add_option( '-d', '--daemon', default=False, action='store_true', help='Run the {0} as a daemon.'.format(self.get_prog_name()) ) self.add_option( '--pid-file', dest='pidfile', default=os.path.join( syspaths.PIDFILE_DIR, '{0}.pid'.format(self.get_prog_name()) ), help="Specify the location of the pidfile. Default: '%default'." ) def _mixin_before_exit(self): if hasattr(self, 'config') and self.config.get('pidfile'): # We've loaded and merged options into the configuration, it's safe # to query about the pidfile if self.check_pidfile(): try: os.unlink(self.config['pidfile']) except OSError as err: # Log error only when running salt-master as a root user. # Otherwise this can be ignored, since salt-master is able to # overwrite the PIDfile on the next start. err_msg = ('PIDfile could not be deleted: %s', six.text_type(self.config['pidfile'])) if salt.utils.platform.is_windows(): user = salt.utils.win_functions.get_current_user() if salt.utils.win_functions.is_admin(user): logger.info(*err_msg) logger.debug(six.text_type(err)) else: if not os.getuid(): logger.info(*err_msg) logger.debug(six.text_type(err)) def set_pidfile(self): from salt.utils.process import set_pidfile set_pidfile(self.config['pidfile'], self.config['user']) def check_pidfile(self): ''' Report whether a pidfile exists ''' from salt.utils.process import check_pidfile return check_pidfile(self.config['pidfile']) def get_pidfile(self): ''' Return a pid contained in a pidfile ''' from salt.utils.process import get_pidfile return get_pidfile(self.config['pidfile']) def daemonize_if_required(self): if self.options.daemon: if self._setup_mp_logging_listener_ is True: # Stop the logging queue listener for the current process # We'll restart it once forked log.shutdown_multiprocessing_logging_listener(daemonizing=True) # Late import so logging works correctly salt.utils.process.daemonize() # Setup the multiprocessing log queue listener if enabled self._setup_mp_logging_listener() def is_daemonized(self, pid): from salt.utils.process import os_is_running return os_is_running(pid) # Common methods for scripts which can daemonize def _install_signal_handlers(self): signal.signal(signal.SIGTERM, self._handle_signals) signal.signal(signal.SIGINT, self._handle_signals) def prepare(self): self.parse_args() def start(self): self.prepare() self._install_signal_handlers() def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument msg = self.__class__.__name__ if signum == signal.SIGINT: msg += ' received a SIGINT.' elif signum == signal.SIGTERM: msg += ' received a SIGTERM.' logging.getLogger(__name__).warning('%s Exiting.', msg) self.shutdown(exitmsg='{0} Exited.'.format(msg)) def shutdown(self, exitcode=0, exitmsg=None): self.exit(exitcode, exitmsg)
saltstack/salt
salt/utils/parsers.py
SaltSupportOptionParser.find_existing_configs
python
def find_existing_configs(self, default): ''' Find configuration files on the system. :return: ''' configs = [] for cfg in [default, self._config_filename_, 'minion', 'proxy', 'cloud', 'spm']: if not cfg: continue config_path = self.get_config_file_path(cfg) if os.path.exists(config_path): configs.append(cfg) if default and default not in configs: raise SystemExit('Unknown configuration unit: {}'.format(default)) return configs
Find configuration files on the system. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/parsers.py#L1950-L1966
null
class SaltSupportOptionParser(six.with_metaclass(OptionParserMeta, OptionParser, ConfigDirMixIn, MergeConfigMixIn, LogLevelMixIn, TimeoutMixIn)): default_timeout = 5 description = 'Salt Support is a program to collect all support data: logs, system configuration etc.' usage = '%prog [options] \'<target>\' <function> [arguments]' # ConfigDirMixIn config filename attribute _config_filename_ = 'master' # LogLevelMixIn attributes _default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level'] _default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file'] def _mixin_setup(self): self.add_option('-P', '--show-profiles', default=False, action='store_true', dest='support_profile_list', help='Show available profiles') self.add_option('-p', '--profile', default='', dest='support_profile', help='Specify support profile or comma-separated profiles, e.g.: "salt,network"') support_archive = '{t}/{h}-support.tar.bz2'.format(t=tempfile.gettempdir(), h=salt.utils.network.get_fqhostname()) self.add_option('-a', '--archive', default=support_archive, dest='support_archive', help=('Specify name of the resulting support archive. ' 'Default is "{f}".'.format(f=support_archive))) self.add_option('-u', '--unit', default='', dest='support_unit', help='Specify examined unit (default "master").') self.add_option('-U', '--show-units', default=False, action='store_true', dest='support_show_units', help='Show available units') self.add_option('-f', '--force', default=False, action='store_true', dest='support_archive_force_overwrite', help='Force overwrite existing archive, if exists') self.add_option('-o', '--out', default='null', dest='support_output_format', help=('Set the default output using the specified outputter, ' 'unless profile does not overrides this. Default: "yaml".')) def setup_config(self, cfg=None): ''' Open suitable config file. :return: ''' _opts, _args = optparse.OptionParser.parse_args(self) configs = self.find_existing_configs(_opts.support_unit) if configs and cfg not in configs: cfg = configs[0] return config.master_config(self.get_config_file_path(cfg))
saltstack/salt
salt/utils/parsers.py
SaltSupportOptionParser.setup_config
python
def setup_config(self, cfg=None): ''' Open suitable config file. :return: ''' _opts, _args = optparse.OptionParser.parse_args(self) configs = self.find_existing_configs(_opts.support_unit) if configs and cfg not in configs: cfg = configs[0] return config.master_config(self.get_config_file_path(cfg))
Open suitable config file. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/parsers.py#L1968-L1978
[ "def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None, exit_on_config_errors=False):\n '''\n Reads in the master configuration file and sets up default options\n\n This is useful for running the actual master daemon. For running\n Master-side client interfaces that need the master opts see\n :py:func:`salt.client.client_config`.\n '''\n if defaults is None:\n defaults = DEFAULT_MASTER_OPTS.copy()\n\n if not os.environ.get(env_var, None):\n # No valid setting was given using the configuration variable.\n # Lets see is SALT_CONFIG_DIR is of any use\n salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)\n if salt_config_dir:\n env_config_file_path = os.path.join(salt_config_dir, 'master')\n if salt_config_dir and os.path.isfile(env_config_file_path):\n # We can get a configuration file using SALT_CONFIG_DIR, let's\n # update the environment with this information\n os.environ[env_var] = env_config_file_path\n\n overrides = load_config(path, env_var, DEFAULT_MASTER_OPTS['conf_file'])\n default_include = overrides.get('default_include',\n defaults['default_include'])\n include = overrides.get('include', [])\n\n overrides.update(include_config(default_include, path, verbose=False,\n exit_on_config_errors=exit_on_config_errors))\n overrides.update(include_config(include, path, verbose=True,\n exit_on_config_errors=exit_on_config_errors))\n opts = apply_master_config(overrides, defaults)\n _validate_ssh_minion_opts(opts)\n _validate_opts(opts)\n # If 'nodegroups:' is uncommented in the master config file, and there are\n # no nodegroups defined, opts['nodegroups'] will be None. Fix this by\n # reverting this value to the default, as if 'nodegroups:' was commented\n # out or not present.\n if opts.get('nodegroups') is None:\n opts['nodegroups'] = DEFAULT_MASTER_OPTS.get('nodegroups', {})\n if salt.utils.data.is_dictlist(opts['nodegroups']):\n opts['nodegroups'] = salt.utils.data.repack_dictlist(opts['nodegroups'])\n apply_sdb(opts)\n return opts\n", "def find_existing_configs(self, default):\n '''\n Find configuration files on the system.\n :return:\n '''\n configs = []\n for cfg in [default, self._config_filename_, 'minion', 'proxy', 'cloud', 'spm']:\n if not cfg:\n continue\n config_path = self.get_config_file_path(cfg)\n if os.path.exists(config_path):\n configs.append(cfg)\n\n if default and default not in configs:\n raise SystemExit('Unknown configuration unit: {}'.format(default))\n\n return configs\n" ]
class SaltSupportOptionParser(six.with_metaclass(OptionParserMeta, OptionParser, ConfigDirMixIn, MergeConfigMixIn, LogLevelMixIn, TimeoutMixIn)): default_timeout = 5 description = 'Salt Support is a program to collect all support data: logs, system configuration etc.' usage = '%prog [options] \'<target>\' <function> [arguments]' # ConfigDirMixIn config filename attribute _config_filename_ = 'master' # LogLevelMixIn attributes _default_logging_level_ = config.DEFAULT_MASTER_OPTS['log_level'] _default_logging_logfile_ = config.DEFAULT_MASTER_OPTS['log_file'] def _mixin_setup(self): self.add_option('-P', '--show-profiles', default=False, action='store_true', dest='support_profile_list', help='Show available profiles') self.add_option('-p', '--profile', default='', dest='support_profile', help='Specify support profile or comma-separated profiles, e.g.: "salt,network"') support_archive = '{t}/{h}-support.tar.bz2'.format(t=tempfile.gettempdir(), h=salt.utils.network.get_fqhostname()) self.add_option('-a', '--archive', default=support_archive, dest='support_archive', help=('Specify name of the resulting support archive. ' 'Default is "{f}".'.format(f=support_archive))) self.add_option('-u', '--unit', default='', dest='support_unit', help='Specify examined unit (default "master").') self.add_option('-U', '--show-units', default=False, action='store_true', dest='support_show_units', help='Show available units') self.add_option('-f', '--force', default=False, action='store_true', dest='support_archive_force_overwrite', help='Force overwrite existing archive, if exists') self.add_option('-o', '--out', default='null', dest='support_output_format', help=('Set the default output using the specified outputter, ' 'unless profile does not overrides this. Default: "yaml".')) def find_existing_configs(self, default): ''' Find configuration files on the system. :return: ''' configs = [] for cfg in [default, self._config_filename_, 'minion', 'proxy', 'cloud', 'spm']: if not cfg: continue config_path = self.get_config_file_path(cfg) if os.path.exists(config_path): configs.append(cfg) if default and default not in configs: raise SystemExit('Unknown configuration unit: {}'.format(default)) return configs
saltstack/salt
salt/states/ethtool.py
coalesce
python
def coalesce(name, **kwargs): ''' Manage coalescing settings of network device name Interface name to apply coalescing settings .. code-block:: yaml eth0: ethtool.coalesce: - name: eth0 - adaptive_rx: on - adaptive_tx: on - rx_usecs: 24 - rx_frame: 0 - rx_usecs_irq: 0 - rx_frames_irq: 0 - tx_usecs: 48 - tx_frames: 0 - tx_usecs_irq: 0 - tx_frames_irq: 0 - stats_block_usecs: 0 - pkt_rate_low: 0 - rx_usecs_low: 0 - rx_frames_low: 0 - tx_usecs_low: 0 - tx_frames_low: 0 - pkt_rate_high: 0 - rx_usecs_high: 0 - rx_frames_high: 0 - tx_usecs_high: 0 - tx_frames_high: 0 - sample_interval: 0 ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Network device {0} coalescing settings are up to date.'.format(name), } apply_coalescing = False if 'test' not in kwargs: kwargs['test'] = __opts__.get('test', False) # Build coalescing settings try: old = __salt__['ethtool.show_coalesce'](name) if not isinstance(old, dict): ret['result'] = False ret['comment'] = 'Device {0} coalescing settings are not supported'.format(name) return ret new = {} diff = [] # Retreive changes to made for key, value in kwargs.items(): if key in old and value != old[key]: new.update({key: value}) diff.append('{0}: {1}'.format(key, value)) # Dry run if kwargs['test']: if not new: return ret if new: ret['result'] = None ret['comment'] = 'Device {0} coalescing settings are set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) return ret # Prepare return output if new: apply_coalescing = True ret['comment'] = 'Device {0} coalescing settings updated.'.format(name) ret['changes']['ethtool_coalesce'] = '\n'.join(diff) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Apply coalescing settings if apply_coalescing: try: __salt__['ethtool.set_coalesce'](name, **new) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret return ret
Manage coalescing settings of network device name Interface name to apply coalescing settings .. code-block:: yaml eth0: ethtool.coalesce: - name: eth0 - adaptive_rx: on - adaptive_tx: on - rx_usecs: 24 - rx_frame: 0 - rx_usecs_irq: 0 - rx_frames_irq: 0 - tx_usecs: 48 - tx_frames: 0 - tx_usecs_irq: 0 - tx_frames_irq: 0 - stats_block_usecs: 0 - pkt_rate_low: 0 - rx_usecs_low: 0 - rx_frames_low: 0 - tx_usecs_low: 0 - tx_frames_low: 0 - pkt_rate_high: 0 - rx_usecs_high: 0 - rx_frames_high: 0 - tx_usecs_high: 0 - tx_frames_high: 0 - sample_interval: 0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ethtool.py#L51-L144
null
# -*- coding: utf-8 -*- ''' Configuration of network device .. versionadded:: 2016.11.0 :codeauthor: Krzysztof Pawlowski <msciciel@msciciel.eu> :maturity: new :depends: python-ethtool :platform: linux .. code-block:: yaml eth0: ethtool.coalesce: - name: eth0 - rx_usecs: 24 - tx_usecs: 48 eth0: ethtool.ring: - name: eth0 - rx: 1024 - tx: 1024 eth0: ethtool.offload: - name: eth0 - tcp_segmentation_offload: on ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt libs from salt.ext import six # Set up logging log = logging.getLogger(__name__) def __virtual__(): ''' Provide ethtool state ''' return 'ethtool' if 'ethtool.show_driver' in __salt__ else False def ring(name, **kwargs): ''' Manage rx/tx ring parameters of network device Use 'max' word to set with factory maximum name Interface name to apply ring parameters .. code-block:: yaml eth0: ethtool.ring: - name: eth0 - rx: 1024 - rx_mini: 0 - rx_jumbo: 0 - tx: max ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Network device {0} ring parameters are up to date.'.format(name), } apply_ring = False if 'test' not in kwargs: kwargs['test'] = __opts__.get('test', False) # Build ring parameters try: old = __salt__['ethtool.show_ring'](name) if not isinstance(old, dict): ret['result'] = False ret['comment'] = 'Device {0} ring parameters are not supported'.format(name) return ret new = {} diff = [] # Retreive changes to made for key, value in kwargs.items(): if key in old: if value == 'max': value = old['{0}_max'.format(key)] if value != old[key]: new.update({key: value}) diff.append('{0}: {1}'.format(key, value)) # Dry run if kwargs['test']: if not new: return ret if new: ret['result'] = None ret['comment'] = 'Device {0} ring parameters are set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) return ret # Prepare return output if new: apply_ring = True ret['comment'] = 'Device {0} ring parameters updated.'.format(name) ret['changes']['ethtool_ring'] = '\n'.join(diff) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Apply ring parameters if apply_ring: try: __salt__['ethtool.set_ring'](name, **new) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret return ret def offload(name, **kwargs): ''' Manage protocol offload and other features of network device name Interface name to apply coalescing settings .. code-block:: yaml eth0: ethtool.offload: - name: eth0 - tcp_segmentation_offload: on ''' ret = { 'name': name, 'changes': {}, 'result': True, 'comment': 'Network device {0} offload settings are up to date.'.format(name), } apply_offload = False if 'test' not in kwargs: kwargs['test'] = __opts__.get('test', False) # Build offload settings try: old = __salt__['ethtool.show_offload'](name) if not isinstance(old, dict): ret['result'] = False ret['comment'] = 'Device {0} offload settings are not supported'.format(name) return ret new = {} diff = [] # Retreive changes to made for key, value in kwargs.items(): value = value and "on" or "off" if key in old and value != old[key]: new.update({key: value}) diff.append('{0}: {1}'.format(key, value)) # Dry run if kwargs['test']: if not new: return ret if new: ret['result'] = None ret['comment'] = 'Device {0} offload settings are set to be ' \ 'updated:\n{1}'.format(name, '\n'.join(diff)) return ret # Prepare return output if new: apply_offload = True ret['comment'] = 'Device {0} offload settings updated.'.format(name) ret['changes']['ethtool_offload'] = '\n'.join(diff) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret # Apply offload settings if apply_offload: try: __salt__['ethtool.set_offload'](name, **new) except AttributeError as error: ret['result'] = False ret['comment'] = six.text_type(error) return ret return ret
saltstack/salt
salt/states/esxdatacenter.py
datacenter_configured
python
def datacenter_configured(name): ''' Makes sure a datacenter exists. If the state is run by an ``esxdatacenter`` minion, the name of the datacenter is retrieved from the proxy details, otherwise the datacenter has the same name as the state. Supported proxies: esxdatacenter name: Datacenter name. Ignored if the proxytype is ``esxdatacenter``. ''' proxy_type = __salt__['vsphere.get_proxy_type']() if proxy_type == 'esxdatacenter': dc_name = __salt__['esxdatacenter.get_details']()['datacenter'] else: dc_name = name log.info('Running datacenter_configured for datacenter \'%s\'', dc_name) ret = {'name': name, 'changes': {}, 'result': None, 'comment': 'Default'} comments = [] si = None try: si = __salt__['vsphere.get_service_instance_via_proxy']() dcs = __salt__['vsphere.list_datacenters_via_proxy']( datacenter_names=[dc_name], service_instance=si) if not dcs: if __opts__['test']: comments.append('State will create ' 'datacenter \'{0}\'.'.format(dc_name)) else: log.debug('Creating datacenter \'%s\'', dc_name) __salt__['vsphere.create_datacenter'](dc_name, si) comments.append('Created datacenter \'{0}\'.'.format(dc_name)) log.info(comments[-1]) ret['changes'].update({'new': {'name': dc_name}}) else: comments.append('Datacenter \'{0}\' already exists. Nothing to be ' 'done.'.format(dc_name)) log.info(comments[-1]) __salt__['vsphere.disconnect'](si) ret['comment'] = '\n'.join(comments) ret['result'] = None if __opts__['test'] and ret['changes'] else True return ret except salt.exceptions.CommandExecutionError as exc: log.error('Error: %s', exc) if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False if not __opts__['test'] else None, 'comment': six.text_type(exc)}) return ret
Makes sure a datacenter exists. If the state is run by an ``esxdatacenter`` minion, the name of the datacenter is retrieved from the proxy details, otherwise the datacenter has the same name as the state. Supported proxies: esxdatacenter name: Datacenter name. Ignored if the proxytype is ``esxdatacenter``.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/esxdatacenter.py#L72-L126
null
# -*- coding: utf-8 -*- ''' Salt states to create and manage VMware vSphere datacenters (datacenters). :codeauthor: :email:`Alexandru Bleotu <alexandru.bleotu@morganstaley.com>` Dependencies ============ - pyVmomi Python Module States ====== datacenter_configured --------------------- Makes sure a datacenter exists and is correctly configured. If the state is run by an ``esxdatacenter`` minion, the name of the datacenter is retrieved from the proxy details, otherwise the datacenter has the same name as the state. Supported proxies: esxdatacenter Example: 1. Make sure that a datacenter named ``target_dc`` exists on the vCenter, using a ``esxdatacenter`` proxy: Proxy minion configuration (connects passthrough to the vCenter): .. code-block:: yaml proxy: proxytype: esxdatacenter datacenter: target_dc vcenter: vcenter.fake.com mechanism: sspi domain: fake.com principal: host State configuration: .. code-block:: yaml datacenter_state: esxdatacenter.datacenter_configured ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging # Import Salt Libs from salt.ext import six import salt.exceptions # Get Logging Started log = logging.getLogger(__name__) LOGIN_DETAILS = {} def __virtual__(): return 'esxdatacenter' def mod_init(low): return True
saltstack/salt
salt/modules/napalm_formula.py
_container_path
python
def _container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Generate all the possible paths within an OpenConfig-like object. This function returns a generator. ''' if not key: key = '' if not container: container = 'config' for model_key, model_value in six.iteritems(model): if key: key_depth = '{prev_key}{delim}{cur_key}'.format(prev_key=key, delim=delim, cur_key=model_key) else: key_depth = model_key if model_key == container: yield key_depth else: for value in _container_path(model_value, key=key_depth, container=container, delim=delim): yield value
Generate all the possible paths within an OpenConfig-like object. This function returns a generator.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_formula.py#L42-L68
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def _container_path(model,\n key=None,\n container=None,\n delim=DEFAULT_TARGET_DELIM):\n '''\n Generate all the possible paths within an OpenConfig-like object.\n This function returns a generator.\n '''\n if not key:\n key = ''\n if not container:\n container = 'config'\n for model_key, model_value in six.iteritems(model):\n if key:\n key_depth = '{prev_key}{delim}{cur_key}'.format(prev_key=key,\n delim=delim,\n cur_key=model_key)\n else:\n key_depth = model_key\n if model_key == container:\n yield key_depth\n else:\n for value in _container_path(model_value,\n key=key_depth,\n container=container,\n delim=delim):\n yield value\n" ]
# -*- coding: utf-8 -*- ''' NAPALM Formula helpers ====================== .. versionadded:: 2019.2.0 This is an Execution Module providing helpers for various NAPALM formulas, e.g., napalm-interfaces-formula, napalm-bgp-formula, napalm-ntp-formula etc., meant to provide various helper functions to make the templates more readable. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import copy import logging import fnmatch # Import salt modules import salt.utils.napalm import salt.ext.six as six import salt.utils.dictupdate from salt.defaults import DEFAULT_TARGET_DELIM try: from salt.utils.data import traverse_dict_and_list as _traverse_dict_and_list except ImportError: from salt.utils import traverse_dict_and_list as _traverse_dict_and_list __proxyenabled__ = ['*'] __virtualname__ = 'napalm_formula' log = logging.getLogger(__name__) def __virtual__(): ''' Available only on NAPALM Minions. ''' return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Return the list of all the possible paths in a container, down to the ``config`` container. This function can be used to verify that the ``model`` is a Python object correctly structured and respecting the OpenConfig hierarchy. model The OpenConfig-structured object to inspect. delim: ``:`` The key delimiter. In particular cases, it is indicated to use ``//`` as ``:`` might be already used in various cases, e.g., IPv6 addresses, interface name (e.g., Juniper QFX series), etc. CLI Example: .. code-block:: bash salt '*' napalm_formula.container_path "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" The example above would return a list with the following element: ``interfaces:interface:Ethernet1:config`` which is the only possible path in that hierarchy. Other output examples: .. code-block:: text - interfaces:interface:Ethernet1:config - interfaces:interface:Ethernet1:subinterfaces:subinterface:0:config - interfaces:interface:Ethernet2:config ''' return list(_container_path(model)) def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM): ''' Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True ''' if not dict_: dict_ = {} prev_hier = dict_ dict_hier = key.split(delim) for each in dict_hier[:-1]: if each not in prev_hier: prev_hier[each] = {} prev_hier = prev_hier[each] prev_hier[dict_hier[-1]] = copy.deepcopy(val) return dict_ def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): ''' Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar ''' return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter) def dictupdate(dest, upd, recursive_update=True, merge_lists=False): ''' Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like ``FunctionWrapper``). If ``merge_lists=True``, will aggregate list object types instead of replace. The list in ``upd`` is added to the list in ``dest``, so the resulting list is ``dest[key] + upd[key]``. This behaviour is only activated when ``recursive_update=True``. By default ``merge_lists=False``. ''' return salt.utils.dictupdate.update(dest, upd, recursive_update=recursive_update, merge_lists=merge_lists) def defaults(model, defaults_, delim='//', flipped_merge=False): ''' Apply the defaults to a Python dictionary having the structure as described in the OpenConfig standards. model The OpenConfig model to apply the defaults to. defaults The dictionary of defaults. This argument must equally be structured with respect to the OpenConfig standards. For ease of use, the keys of these support glob matching, therefore we don't have to provide the defaults for each entity but only for the entity type. See an example below. delim: ``//`` The key delimiter to use. Generally, ``//`` should cover all the possible cases, and you don't need to override this value. flipped_merge: ``False`` Whether should merge the model into the defaults, or the defaults into the model. Default: ``False`` (merge the model into the defaults, i.e., any defaults would be overridden by the values from the ``model``). CLI Example: .. code-block:: bash salt '*' napalm_formula.defaults "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" "{'interfaces': {'interface': {'*': {'config': {'enabled': True}}}}}" As one can notice in the example above, the ``*`` corresponds to the interface name, therefore, the defaults will be applied on all the interfaces. ''' merged = {} log.debug('Applying the defaults:') log.debug(defaults_) log.debug('openconfig like dictionary:') log.debug(model) for model_path in _container_path(model, delim=delim): for default_path in _container_path(defaults_, delim=delim): log.debug('Comparing %s to %s', model_path, default_path) if not fnmatch.fnmatch(model_path, default_path) or\ not len(model_path.split(delim)) == len(default_path.split(delim)): continue log.debug('%s matches %s', model_path, default_path) # If there's a match, it will build the dictionary from the top devault_val = _traverse_dict_and_list(defaults_, default_path, delimiter=delim) merged = setval(model_path, devault_val, dict_=merged, delim=delim) log.debug('Complete default dictionary') log.debug(merged) log.debug('Merging with the model') log.debug(model) if flipped_merge: return salt.utils.dictupdate.update(model, merged) return salt.utils.dictupdate.update(merged, model) def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts): ''' Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description"; ''' value = traverse(dictionary, field) if value is None: return '' if prepend is None: prepend = field.replace('_', '-') if append is None: if __grains__['os'] in ('junos',): append = ';' else: append = '' if quotes: value = '"{value}"'.format(value=value) return '{prepend} {value}{append}'.format(prepend=prepend, value=value, append=append) def render_fields(dictionary, *fields, **opts): ''' This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description" ''' results = [] for field in fields: res = render_field(dictionary, field, **opts) if res: results.append(res) if 'indent' not in opts: opts['indent'] = 0 if 'separator' not in opts: opts['separator'] = '\n{ind}'.format(ind=' '*opts['indent']) return opts['separator'].join(results)
saltstack/salt
salt/modules/napalm_formula.py
setval
python
def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM): ''' Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True ''' if not dict_: dict_ = {} prev_hier = dict_ dict_hier = key.split(delim) for each in dict_hier[:-1]: if each not in prev_hier: prev_hier[each] = {} prev_hier = prev_hier[each] prev_hier[dict_hier[-1]] = copy.deepcopy(val) return dict_
Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_formula.py#L107-L133
null
# -*- coding: utf-8 -*- ''' NAPALM Formula helpers ====================== .. versionadded:: 2019.2.0 This is an Execution Module providing helpers for various NAPALM formulas, e.g., napalm-interfaces-formula, napalm-bgp-formula, napalm-ntp-formula etc., meant to provide various helper functions to make the templates more readable. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import copy import logging import fnmatch # Import salt modules import salt.utils.napalm import salt.ext.six as six import salt.utils.dictupdate from salt.defaults import DEFAULT_TARGET_DELIM try: from salt.utils.data import traverse_dict_and_list as _traverse_dict_and_list except ImportError: from salt.utils import traverse_dict_and_list as _traverse_dict_and_list __proxyenabled__ = ['*'] __virtualname__ = 'napalm_formula' log = logging.getLogger(__name__) def __virtual__(): ''' Available only on NAPALM Minions. ''' return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def _container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Generate all the possible paths within an OpenConfig-like object. This function returns a generator. ''' if not key: key = '' if not container: container = 'config' for model_key, model_value in six.iteritems(model): if key: key_depth = '{prev_key}{delim}{cur_key}'.format(prev_key=key, delim=delim, cur_key=model_key) else: key_depth = model_key if model_key == container: yield key_depth else: for value in _container_path(model_value, key=key_depth, container=container, delim=delim): yield value def container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Return the list of all the possible paths in a container, down to the ``config`` container. This function can be used to verify that the ``model`` is a Python object correctly structured and respecting the OpenConfig hierarchy. model The OpenConfig-structured object to inspect. delim: ``:`` The key delimiter. In particular cases, it is indicated to use ``//`` as ``:`` might be already used in various cases, e.g., IPv6 addresses, interface name (e.g., Juniper QFX series), etc. CLI Example: .. code-block:: bash salt '*' napalm_formula.container_path "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" The example above would return a list with the following element: ``interfaces:interface:Ethernet1:config`` which is the only possible path in that hierarchy. Other output examples: .. code-block:: text - interfaces:interface:Ethernet1:config - interfaces:interface:Ethernet1:subinterfaces:subinterface:0:config - interfaces:interface:Ethernet2:config ''' return list(_container_path(model)) def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): ''' Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar ''' return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter) def dictupdate(dest, upd, recursive_update=True, merge_lists=False): ''' Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like ``FunctionWrapper``). If ``merge_lists=True``, will aggregate list object types instead of replace. The list in ``upd`` is added to the list in ``dest``, so the resulting list is ``dest[key] + upd[key]``. This behaviour is only activated when ``recursive_update=True``. By default ``merge_lists=False``. ''' return salt.utils.dictupdate.update(dest, upd, recursive_update=recursive_update, merge_lists=merge_lists) def defaults(model, defaults_, delim='//', flipped_merge=False): ''' Apply the defaults to a Python dictionary having the structure as described in the OpenConfig standards. model The OpenConfig model to apply the defaults to. defaults The dictionary of defaults. This argument must equally be structured with respect to the OpenConfig standards. For ease of use, the keys of these support glob matching, therefore we don't have to provide the defaults for each entity but only for the entity type. See an example below. delim: ``//`` The key delimiter to use. Generally, ``//`` should cover all the possible cases, and you don't need to override this value. flipped_merge: ``False`` Whether should merge the model into the defaults, or the defaults into the model. Default: ``False`` (merge the model into the defaults, i.e., any defaults would be overridden by the values from the ``model``). CLI Example: .. code-block:: bash salt '*' napalm_formula.defaults "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" "{'interfaces': {'interface': {'*': {'config': {'enabled': True}}}}}" As one can notice in the example above, the ``*`` corresponds to the interface name, therefore, the defaults will be applied on all the interfaces. ''' merged = {} log.debug('Applying the defaults:') log.debug(defaults_) log.debug('openconfig like dictionary:') log.debug(model) for model_path in _container_path(model, delim=delim): for default_path in _container_path(defaults_, delim=delim): log.debug('Comparing %s to %s', model_path, default_path) if not fnmatch.fnmatch(model_path, default_path) or\ not len(model_path.split(delim)) == len(default_path.split(delim)): continue log.debug('%s matches %s', model_path, default_path) # If there's a match, it will build the dictionary from the top devault_val = _traverse_dict_and_list(defaults_, default_path, delimiter=delim) merged = setval(model_path, devault_val, dict_=merged, delim=delim) log.debug('Complete default dictionary') log.debug(merged) log.debug('Merging with the model') log.debug(model) if flipped_merge: return salt.utils.dictupdate.update(model, merged) return salt.utils.dictupdate.update(merged, model) def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts): ''' Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description"; ''' value = traverse(dictionary, field) if value is None: return '' if prepend is None: prepend = field.replace('_', '-') if append is None: if __grains__['os'] in ('junos',): append = ';' else: append = '' if quotes: value = '"{value}"'.format(value=value) return '{prepend} {value}{append}'.format(prepend=prepend, value=value, append=append) def render_fields(dictionary, *fields, **opts): ''' This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description" ''' results = [] for field in fields: res = render_field(dictionary, field, **opts) if res: results.append(res) if 'indent' not in opts: opts['indent'] = 0 if 'separator' not in opts: opts['separator'] = '\n{ind}'.format(ind=' '*opts['indent']) return opts['separator'].join(results)
saltstack/salt
salt/modules/napalm_formula.py
traverse
python
def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): ''' Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar ''' return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter)
Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_formula.py#L136-L153
null
# -*- coding: utf-8 -*- ''' NAPALM Formula helpers ====================== .. versionadded:: 2019.2.0 This is an Execution Module providing helpers for various NAPALM formulas, e.g., napalm-interfaces-formula, napalm-bgp-formula, napalm-ntp-formula etc., meant to provide various helper functions to make the templates more readable. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import copy import logging import fnmatch # Import salt modules import salt.utils.napalm import salt.ext.six as six import salt.utils.dictupdate from salt.defaults import DEFAULT_TARGET_DELIM try: from salt.utils.data import traverse_dict_and_list as _traverse_dict_and_list except ImportError: from salt.utils import traverse_dict_and_list as _traverse_dict_and_list __proxyenabled__ = ['*'] __virtualname__ = 'napalm_formula' log = logging.getLogger(__name__) def __virtual__(): ''' Available only on NAPALM Minions. ''' return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def _container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Generate all the possible paths within an OpenConfig-like object. This function returns a generator. ''' if not key: key = '' if not container: container = 'config' for model_key, model_value in six.iteritems(model): if key: key_depth = '{prev_key}{delim}{cur_key}'.format(prev_key=key, delim=delim, cur_key=model_key) else: key_depth = model_key if model_key == container: yield key_depth else: for value in _container_path(model_value, key=key_depth, container=container, delim=delim): yield value def container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Return the list of all the possible paths in a container, down to the ``config`` container. This function can be used to verify that the ``model`` is a Python object correctly structured and respecting the OpenConfig hierarchy. model The OpenConfig-structured object to inspect. delim: ``:`` The key delimiter. In particular cases, it is indicated to use ``//`` as ``:`` might be already used in various cases, e.g., IPv6 addresses, interface name (e.g., Juniper QFX series), etc. CLI Example: .. code-block:: bash salt '*' napalm_formula.container_path "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" The example above would return a list with the following element: ``interfaces:interface:Ethernet1:config`` which is the only possible path in that hierarchy. Other output examples: .. code-block:: text - interfaces:interface:Ethernet1:config - interfaces:interface:Ethernet1:subinterfaces:subinterface:0:config - interfaces:interface:Ethernet2:config ''' return list(_container_path(model)) def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM): ''' Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True ''' if not dict_: dict_ = {} prev_hier = dict_ dict_hier = key.split(delim) for each in dict_hier[:-1]: if each not in prev_hier: prev_hier[each] = {} prev_hier = prev_hier[each] prev_hier[dict_hier[-1]] = copy.deepcopy(val) return dict_ def dictupdate(dest, upd, recursive_update=True, merge_lists=False): ''' Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like ``FunctionWrapper``). If ``merge_lists=True``, will aggregate list object types instead of replace. The list in ``upd`` is added to the list in ``dest``, so the resulting list is ``dest[key] + upd[key]``. This behaviour is only activated when ``recursive_update=True``. By default ``merge_lists=False``. ''' return salt.utils.dictupdate.update(dest, upd, recursive_update=recursive_update, merge_lists=merge_lists) def defaults(model, defaults_, delim='//', flipped_merge=False): ''' Apply the defaults to a Python dictionary having the structure as described in the OpenConfig standards. model The OpenConfig model to apply the defaults to. defaults The dictionary of defaults. This argument must equally be structured with respect to the OpenConfig standards. For ease of use, the keys of these support glob matching, therefore we don't have to provide the defaults for each entity but only for the entity type. See an example below. delim: ``//`` The key delimiter to use. Generally, ``//`` should cover all the possible cases, and you don't need to override this value. flipped_merge: ``False`` Whether should merge the model into the defaults, or the defaults into the model. Default: ``False`` (merge the model into the defaults, i.e., any defaults would be overridden by the values from the ``model``). CLI Example: .. code-block:: bash salt '*' napalm_formula.defaults "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" "{'interfaces': {'interface': {'*': {'config': {'enabled': True}}}}}" As one can notice in the example above, the ``*`` corresponds to the interface name, therefore, the defaults will be applied on all the interfaces. ''' merged = {} log.debug('Applying the defaults:') log.debug(defaults_) log.debug('openconfig like dictionary:') log.debug(model) for model_path in _container_path(model, delim=delim): for default_path in _container_path(defaults_, delim=delim): log.debug('Comparing %s to %s', model_path, default_path) if not fnmatch.fnmatch(model_path, default_path) or\ not len(model_path.split(delim)) == len(default_path.split(delim)): continue log.debug('%s matches %s', model_path, default_path) # If there's a match, it will build the dictionary from the top devault_val = _traverse_dict_and_list(defaults_, default_path, delimiter=delim) merged = setval(model_path, devault_val, dict_=merged, delim=delim) log.debug('Complete default dictionary') log.debug(merged) log.debug('Merging with the model') log.debug(model) if flipped_merge: return salt.utils.dictupdate.update(model, merged) return salt.utils.dictupdate.update(merged, model) def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts): ''' Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description"; ''' value = traverse(dictionary, field) if value is None: return '' if prepend is None: prepend = field.replace('_', '-') if append is None: if __grains__['os'] in ('junos',): append = ';' else: append = '' if quotes: value = '"{value}"'.format(value=value) return '{prepend} {value}{append}'.format(prepend=prepend, value=value, append=append) def render_fields(dictionary, *fields, **opts): ''' This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description" ''' results = [] for field in fields: res = render_field(dictionary, field, **opts) if res: results.append(res) if 'indent' not in opts: opts['indent'] = 0 if 'separator' not in opts: opts['separator'] = '\n{ind}'.format(ind=' '*opts['indent']) return opts['separator'].join(results)
saltstack/salt
salt/modules/napalm_formula.py
dictupdate
python
def dictupdate(dest, upd, recursive_update=True, merge_lists=False): ''' Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like ``FunctionWrapper``). If ``merge_lists=True``, will aggregate list object types instead of replace. The list in ``upd`` is added to the list in ``dest``, so the resulting list is ``dest[key] + upd[key]``. This behaviour is only activated when ``recursive_update=True``. By default ``merge_lists=False``. ''' return salt.utils.dictupdate.update(dest, upd, recursive_update=recursive_update, merge_lists=merge_lists)
Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like ``FunctionWrapper``). If ``merge_lists=True``, will aggregate list object types instead of replace. The list in ``upd`` is added to the list in ``dest``, so the resulting list is ``dest[key] + upd[key]``. This behaviour is only activated when ``recursive_update=True``. By default ``merge_lists=False``.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_formula.py#L156-L170
[ "def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n" ]
# -*- coding: utf-8 -*- ''' NAPALM Formula helpers ====================== .. versionadded:: 2019.2.0 This is an Execution Module providing helpers for various NAPALM formulas, e.g., napalm-interfaces-formula, napalm-bgp-formula, napalm-ntp-formula etc., meant to provide various helper functions to make the templates more readable. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import copy import logging import fnmatch # Import salt modules import salt.utils.napalm import salt.ext.six as six import salt.utils.dictupdate from salt.defaults import DEFAULT_TARGET_DELIM try: from salt.utils.data import traverse_dict_and_list as _traverse_dict_and_list except ImportError: from salt.utils import traverse_dict_and_list as _traverse_dict_and_list __proxyenabled__ = ['*'] __virtualname__ = 'napalm_formula' log = logging.getLogger(__name__) def __virtual__(): ''' Available only on NAPALM Minions. ''' return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def _container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Generate all the possible paths within an OpenConfig-like object. This function returns a generator. ''' if not key: key = '' if not container: container = 'config' for model_key, model_value in six.iteritems(model): if key: key_depth = '{prev_key}{delim}{cur_key}'.format(prev_key=key, delim=delim, cur_key=model_key) else: key_depth = model_key if model_key == container: yield key_depth else: for value in _container_path(model_value, key=key_depth, container=container, delim=delim): yield value def container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Return the list of all the possible paths in a container, down to the ``config`` container. This function can be used to verify that the ``model`` is a Python object correctly structured and respecting the OpenConfig hierarchy. model The OpenConfig-structured object to inspect. delim: ``:`` The key delimiter. In particular cases, it is indicated to use ``//`` as ``:`` might be already used in various cases, e.g., IPv6 addresses, interface name (e.g., Juniper QFX series), etc. CLI Example: .. code-block:: bash salt '*' napalm_formula.container_path "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" The example above would return a list with the following element: ``interfaces:interface:Ethernet1:config`` which is the only possible path in that hierarchy. Other output examples: .. code-block:: text - interfaces:interface:Ethernet1:config - interfaces:interface:Ethernet1:subinterfaces:subinterface:0:config - interfaces:interface:Ethernet2:config ''' return list(_container_path(model)) def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM): ''' Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True ''' if not dict_: dict_ = {} prev_hier = dict_ dict_hier = key.split(delim) for each in dict_hier[:-1]: if each not in prev_hier: prev_hier[each] = {} prev_hier = prev_hier[each] prev_hier[dict_hier[-1]] = copy.deepcopy(val) return dict_ def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): ''' Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar ''' return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter) def defaults(model, defaults_, delim='//', flipped_merge=False): ''' Apply the defaults to a Python dictionary having the structure as described in the OpenConfig standards. model The OpenConfig model to apply the defaults to. defaults The dictionary of defaults. This argument must equally be structured with respect to the OpenConfig standards. For ease of use, the keys of these support glob matching, therefore we don't have to provide the defaults for each entity but only for the entity type. See an example below. delim: ``//`` The key delimiter to use. Generally, ``//`` should cover all the possible cases, and you don't need to override this value. flipped_merge: ``False`` Whether should merge the model into the defaults, or the defaults into the model. Default: ``False`` (merge the model into the defaults, i.e., any defaults would be overridden by the values from the ``model``). CLI Example: .. code-block:: bash salt '*' napalm_formula.defaults "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" "{'interfaces': {'interface': {'*': {'config': {'enabled': True}}}}}" As one can notice in the example above, the ``*`` corresponds to the interface name, therefore, the defaults will be applied on all the interfaces. ''' merged = {} log.debug('Applying the defaults:') log.debug(defaults_) log.debug('openconfig like dictionary:') log.debug(model) for model_path in _container_path(model, delim=delim): for default_path in _container_path(defaults_, delim=delim): log.debug('Comparing %s to %s', model_path, default_path) if not fnmatch.fnmatch(model_path, default_path) or\ not len(model_path.split(delim)) == len(default_path.split(delim)): continue log.debug('%s matches %s', model_path, default_path) # If there's a match, it will build the dictionary from the top devault_val = _traverse_dict_and_list(defaults_, default_path, delimiter=delim) merged = setval(model_path, devault_val, dict_=merged, delim=delim) log.debug('Complete default dictionary') log.debug(merged) log.debug('Merging with the model') log.debug(model) if flipped_merge: return salt.utils.dictupdate.update(model, merged) return salt.utils.dictupdate.update(merged, model) def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts): ''' Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description"; ''' value = traverse(dictionary, field) if value is None: return '' if prepend is None: prepend = field.replace('_', '-') if append is None: if __grains__['os'] in ('junos',): append = ';' else: append = '' if quotes: value = '"{value}"'.format(value=value) return '{prepend} {value}{append}'.format(prepend=prepend, value=value, append=append) def render_fields(dictionary, *fields, **opts): ''' This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description" ''' results = [] for field in fields: res = render_field(dictionary, field, **opts) if res: results.append(res) if 'indent' not in opts: opts['indent'] = 0 if 'separator' not in opts: opts['separator'] = '\n{ind}'.format(ind=' '*opts['indent']) return opts['separator'].join(results)
saltstack/salt
salt/modules/napalm_formula.py
defaults
python
def defaults(model, defaults_, delim='//', flipped_merge=False): ''' Apply the defaults to a Python dictionary having the structure as described in the OpenConfig standards. model The OpenConfig model to apply the defaults to. defaults The dictionary of defaults. This argument must equally be structured with respect to the OpenConfig standards. For ease of use, the keys of these support glob matching, therefore we don't have to provide the defaults for each entity but only for the entity type. See an example below. delim: ``//`` The key delimiter to use. Generally, ``//`` should cover all the possible cases, and you don't need to override this value. flipped_merge: ``False`` Whether should merge the model into the defaults, or the defaults into the model. Default: ``False`` (merge the model into the defaults, i.e., any defaults would be overridden by the values from the ``model``). CLI Example: .. code-block:: bash salt '*' napalm_formula.defaults "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" "{'interfaces': {'interface': {'*': {'config': {'enabled': True}}}}}" As one can notice in the example above, the ``*`` corresponds to the interface name, therefore, the defaults will be applied on all the interfaces. ''' merged = {} log.debug('Applying the defaults:') log.debug(defaults_) log.debug('openconfig like dictionary:') log.debug(model) for model_path in _container_path(model, delim=delim): for default_path in _container_path(defaults_, delim=delim): log.debug('Comparing %s to %s', model_path, default_path) if not fnmatch.fnmatch(model_path, default_path) or\ not len(model_path.split(delim)) == len(default_path.split(delim)): continue log.debug('%s matches %s', model_path, default_path) # If there's a match, it will build the dictionary from the top devault_val = _traverse_dict_and_list(defaults_, default_path, delimiter=delim) merged = setval(model_path, devault_val, dict_=merged, delim=delim) log.debug('Complete default dictionary') log.debug(merged) log.debug('Merging with the model') log.debug(model) if flipped_merge: return salt.utils.dictupdate.update(model, merged) return salt.utils.dictupdate.update(merged, model)
Apply the defaults to a Python dictionary having the structure as described in the OpenConfig standards. model The OpenConfig model to apply the defaults to. defaults The dictionary of defaults. This argument must equally be structured with respect to the OpenConfig standards. For ease of use, the keys of these support glob matching, therefore we don't have to provide the defaults for each entity but only for the entity type. See an example below. delim: ``//`` The key delimiter to use. Generally, ``//`` should cover all the possible cases, and you don't need to override this value. flipped_merge: ``False`` Whether should merge the model into the defaults, or the defaults into the model. Default: ``False`` (merge the model into the defaults, i.e., any defaults would be overridden by the values from the ``model``). CLI Example: .. code-block:: bash salt '*' napalm_formula.defaults "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" "{'interfaces': {'interface': {'*': {'config': {'enabled': True}}}}}" As one can notice in the example above, the ``*`` corresponds to the interface name, therefore, the defaults will be applied on all the interfaces.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_formula.py#L173-L234
[ "def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n", "def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM):\n '''\n Set a value under the dictionary hierarchy identified\n under the key. The target 'foo/bar/baz' returns the\n dictionary hierarchy {'foo': {'bar': {'baz': {}}}}.\n\n .. note::\n\n Currently this doesn't work with integers, i.e.\n cannot build lists dynamically.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' formula.setval foo:baz:bar True\n '''\n if not dict_:\n dict_ = {}\n prev_hier = dict_\n dict_hier = key.split(delim)\n for each in dict_hier[:-1]:\n if each not in prev_hier:\n prev_hier[each] = {}\n prev_hier = prev_hier[each]\n prev_hier[dict_hier[-1]] = copy.deepcopy(val)\n return dict_\n", "def _container_path(model,\n key=None,\n container=None,\n delim=DEFAULT_TARGET_DELIM):\n '''\n Generate all the possible paths within an OpenConfig-like object.\n This function returns a generator.\n '''\n if not key:\n key = ''\n if not container:\n container = 'config'\n for model_key, model_value in six.iteritems(model):\n if key:\n key_depth = '{prev_key}{delim}{cur_key}'.format(prev_key=key,\n delim=delim,\n cur_key=model_key)\n else:\n key_depth = model_key\n if model_key == container:\n yield key_depth\n else:\n for value in _container_path(model_value,\n key=key_depth,\n container=container,\n delim=delim):\n yield value\n" ]
# -*- coding: utf-8 -*- ''' NAPALM Formula helpers ====================== .. versionadded:: 2019.2.0 This is an Execution Module providing helpers for various NAPALM formulas, e.g., napalm-interfaces-formula, napalm-bgp-formula, napalm-ntp-formula etc., meant to provide various helper functions to make the templates more readable. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import copy import logging import fnmatch # Import salt modules import salt.utils.napalm import salt.ext.six as six import salt.utils.dictupdate from salt.defaults import DEFAULT_TARGET_DELIM try: from salt.utils.data import traverse_dict_and_list as _traverse_dict_and_list except ImportError: from salt.utils import traverse_dict_and_list as _traverse_dict_and_list __proxyenabled__ = ['*'] __virtualname__ = 'napalm_formula' log = logging.getLogger(__name__) def __virtual__(): ''' Available only on NAPALM Minions. ''' return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def _container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Generate all the possible paths within an OpenConfig-like object. This function returns a generator. ''' if not key: key = '' if not container: container = 'config' for model_key, model_value in six.iteritems(model): if key: key_depth = '{prev_key}{delim}{cur_key}'.format(prev_key=key, delim=delim, cur_key=model_key) else: key_depth = model_key if model_key == container: yield key_depth else: for value in _container_path(model_value, key=key_depth, container=container, delim=delim): yield value def container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Return the list of all the possible paths in a container, down to the ``config`` container. This function can be used to verify that the ``model`` is a Python object correctly structured and respecting the OpenConfig hierarchy. model The OpenConfig-structured object to inspect. delim: ``:`` The key delimiter. In particular cases, it is indicated to use ``//`` as ``:`` might be already used in various cases, e.g., IPv6 addresses, interface name (e.g., Juniper QFX series), etc. CLI Example: .. code-block:: bash salt '*' napalm_formula.container_path "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" The example above would return a list with the following element: ``interfaces:interface:Ethernet1:config`` which is the only possible path in that hierarchy. Other output examples: .. code-block:: text - interfaces:interface:Ethernet1:config - interfaces:interface:Ethernet1:subinterfaces:subinterface:0:config - interfaces:interface:Ethernet2:config ''' return list(_container_path(model)) def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM): ''' Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True ''' if not dict_: dict_ = {} prev_hier = dict_ dict_hier = key.split(delim) for each in dict_hier[:-1]: if each not in prev_hier: prev_hier[each] = {} prev_hier = prev_hier[each] prev_hier[dict_hier[-1]] = copy.deepcopy(val) return dict_ def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): ''' Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar ''' return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter) def dictupdate(dest, upd, recursive_update=True, merge_lists=False): ''' Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like ``FunctionWrapper``). If ``merge_lists=True``, will aggregate list object types instead of replace. The list in ``upd`` is added to the list in ``dest``, so the resulting list is ``dest[key] + upd[key]``. This behaviour is only activated when ``recursive_update=True``. By default ``merge_lists=False``. ''' return salt.utils.dictupdate.update(dest, upd, recursive_update=recursive_update, merge_lists=merge_lists) def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts): ''' Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description"; ''' value = traverse(dictionary, field) if value is None: return '' if prepend is None: prepend = field.replace('_', '-') if append is None: if __grains__['os'] in ('junos',): append = ';' else: append = '' if quotes: value = '"{value}"'.format(value=value) return '{prepend} {value}{append}'.format(prepend=prepend, value=value, append=append) def render_fields(dictionary, *fields, **opts): ''' This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description" ''' results = [] for field in fields: res = render_field(dictionary, field, **opts) if res: results.append(res) if 'indent' not in opts: opts['indent'] = 0 if 'separator' not in opts: opts['separator'] = '\n{ind}'.format(ind=' '*opts['indent']) return opts['separator'].join(results)
saltstack/salt
salt/modules/napalm_formula.py
render_field
python
def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts): ''' Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description"; ''' value = traverse(dictionary, field) if value is None: return '' if prepend is None: prepend = field.replace('_', '-') if append is None: if __grains__['os'] in ('junos',): append = ';' else: append = '' if quotes: value = '"{value}"'.format(value=value) return '{prepend} {value}{append}'.format(prepend=prepend, value=value, append=append)
Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description";
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_formula.py#L237-L314
[ "def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM):\n '''\n Traverse a dict or list using a colon-delimited (or otherwise delimited,\n using the ``delimiter`` param) target string. The target ``foo:bar:0`` will\n return ``data['foo']['bar'][0]`` if this value exists, and will otherwise\n return the dict in the default argument.\n Function will automatically determine the target type.\n The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like\n ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}``\n then ``return data['foo']['bar']['0']``\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' napalm_formula.traverse \"{'foo': {'bar': {'baz': True}}}\" foo:baz:bar\n '''\n return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter)\n" ]
# -*- coding: utf-8 -*- ''' NAPALM Formula helpers ====================== .. versionadded:: 2019.2.0 This is an Execution Module providing helpers for various NAPALM formulas, e.g., napalm-interfaces-formula, napalm-bgp-formula, napalm-ntp-formula etc., meant to provide various helper functions to make the templates more readable. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import copy import logging import fnmatch # Import salt modules import salt.utils.napalm import salt.ext.six as six import salt.utils.dictupdate from salt.defaults import DEFAULT_TARGET_DELIM try: from salt.utils.data import traverse_dict_and_list as _traverse_dict_and_list except ImportError: from salt.utils import traverse_dict_and_list as _traverse_dict_and_list __proxyenabled__ = ['*'] __virtualname__ = 'napalm_formula' log = logging.getLogger(__name__) def __virtual__(): ''' Available only on NAPALM Minions. ''' return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def _container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Generate all the possible paths within an OpenConfig-like object. This function returns a generator. ''' if not key: key = '' if not container: container = 'config' for model_key, model_value in six.iteritems(model): if key: key_depth = '{prev_key}{delim}{cur_key}'.format(prev_key=key, delim=delim, cur_key=model_key) else: key_depth = model_key if model_key == container: yield key_depth else: for value in _container_path(model_value, key=key_depth, container=container, delim=delim): yield value def container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Return the list of all the possible paths in a container, down to the ``config`` container. This function can be used to verify that the ``model`` is a Python object correctly structured and respecting the OpenConfig hierarchy. model The OpenConfig-structured object to inspect. delim: ``:`` The key delimiter. In particular cases, it is indicated to use ``//`` as ``:`` might be already used in various cases, e.g., IPv6 addresses, interface name (e.g., Juniper QFX series), etc. CLI Example: .. code-block:: bash salt '*' napalm_formula.container_path "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" The example above would return a list with the following element: ``interfaces:interface:Ethernet1:config`` which is the only possible path in that hierarchy. Other output examples: .. code-block:: text - interfaces:interface:Ethernet1:config - interfaces:interface:Ethernet1:subinterfaces:subinterface:0:config - interfaces:interface:Ethernet2:config ''' return list(_container_path(model)) def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM): ''' Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True ''' if not dict_: dict_ = {} prev_hier = dict_ dict_hier = key.split(delim) for each in dict_hier[:-1]: if each not in prev_hier: prev_hier[each] = {} prev_hier = prev_hier[each] prev_hier[dict_hier[-1]] = copy.deepcopy(val) return dict_ def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): ''' Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar ''' return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter) def dictupdate(dest, upd, recursive_update=True, merge_lists=False): ''' Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like ``FunctionWrapper``). If ``merge_lists=True``, will aggregate list object types instead of replace. The list in ``upd`` is added to the list in ``dest``, so the resulting list is ``dest[key] + upd[key]``. This behaviour is only activated when ``recursive_update=True``. By default ``merge_lists=False``. ''' return salt.utils.dictupdate.update(dest, upd, recursive_update=recursive_update, merge_lists=merge_lists) def defaults(model, defaults_, delim='//', flipped_merge=False): ''' Apply the defaults to a Python dictionary having the structure as described in the OpenConfig standards. model The OpenConfig model to apply the defaults to. defaults The dictionary of defaults. This argument must equally be structured with respect to the OpenConfig standards. For ease of use, the keys of these support glob matching, therefore we don't have to provide the defaults for each entity but only for the entity type. See an example below. delim: ``//`` The key delimiter to use. Generally, ``//`` should cover all the possible cases, and you don't need to override this value. flipped_merge: ``False`` Whether should merge the model into the defaults, or the defaults into the model. Default: ``False`` (merge the model into the defaults, i.e., any defaults would be overridden by the values from the ``model``). CLI Example: .. code-block:: bash salt '*' napalm_formula.defaults "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" "{'interfaces': {'interface': {'*': {'config': {'enabled': True}}}}}" As one can notice in the example above, the ``*`` corresponds to the interface name, therefore, the defaults will be applied on all the interfaces. ''' merged = {} log.debug('Applying the defaults:') log.debug(defaults_) log.debug('openconfig like dictionary:') log.debug(model) for model_path in _container_path(model, delim=delim): for default_path in _container_path(defaults_, delim=delim): log.debug('Comparing %s to %s', model_path, default_path) if not fnmatch.fnmatch(model_path, default_path) or\ not len(model_path.split(delim)) == len(default_path.split(delim)): continue log.debug('%s matches %s', model_path, default_path) # If there's a match, it will build the dictionary from the top devault_val = _traverse_dict_and_list(defaults_, default_path, delimiter=delim) merged = setval(model_path, devault_val, dict_=merged, delim=delim) log.debug('Complete default dictionary') log.debug(merged) log.debug('Merging with the model') log.debug(model) if flipped_merge: return salt.utils.dictupdate.update(model, merged) return salt.utils.dictupdate.update(merged, model) def render_fields(dictionary, *fields, **opts): ''' This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description" ''' results = [] for field in fields: res = render_field(dictionary, field, **opts) if res: results.append(res) if 'indent' not in opts: opts['indent'] = 0 if 'separator' not in opts: opts['separator'] = '\n{ind}'.format(ind=' '*opts['indent']) return opts['separator'].join(results)
saltstack/salt
salt/modules/napalm_formula.py
render_fields
python
def render_fields(dictionary, *fields, **opts): ''' This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description" ''' results = [] for field in fields: res = render_field(dictionary, field, **opts) if res: results.append(res) if 'indent' not in opts: opts['indent'] = 0 if 'separator' not in opts: opts['separator'] = '\n{ind}'.format(ind=' '*opts['indent']) return opts['separator'].join(results)
This function works similarly to :mod:`render_field <salt.modules.napalm_formula.render_field>` but for a list of fields from the same dictionary, rendering, indenting and distributing them on separate lines. dictionary The dictionary to traverse. fields A list of field names or paths in the dictionary. indent: ``0`` The indentation to use, prepended to the rendered field. separator: ``\\n`` The separator to use between fields. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_fields "{'mtu': 68, 'description': 'Interface description'}" mtu description Jinja usage example: .. code-block:: jinja {%- set config={'mtu': 68, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_fields(config, 'mtu', 'description', quotes=True) }} The Jinja example above would generate the following configuration: .. code-block:: text mtu "68" description "Interface description"
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_formula.py#L317-L367
[ "def render_field(dictionary,\n field,\n prepend=None,\n append=None,\n quotes=False,\n **opts):\n '''\n Render a field found under the ``field`` level of the hierarchy in the\n ``dictionary`` object.\n This is useful to render a field in a Jinja template without worrying that\n the hierarchy might not exist. For example if we do the following in Jinja:\n ``{{ interfaces.interface.Ethernet5.config.description }}`` for the\n following object:\n ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}``\n it would error, as the ``Ethernet5`` key does not exist.\n With this helper, we can skip this and avoid existence checks. This must be\n however used with care.\n\n dictionary\n The dictionary to traverse.\n\n field\n The key name or part to traverse in the ``dictionary``.\n\n prepend: ``None``\n The text to prepend in front of the text. Usually, we need to have the\n name of the field too when generating the configuration.\n\n append: ``None``\n Text to append at the end.\n\n quotes: ``False``\n Whether should wrap the text around quotes.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' napalm_formula.render_field \"{'enabled': True}\" enabled\n # This would return the value of the ``enabled`` leaf key\n salt '*' napalm_formula.render_field \"{'enabled': True}\" description\n # This would not error\n\n Jinja usage example:\n\n .. code-block:: jinja\n\n {%- set config = {'enabled': True, 'description': 'Interface description'} %}\n {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }}\n\n The example above would be rendered on Arista / Cisco as:\n\n .. code-block:: text\n\n description \"Interface description\"\n\n While on Junos (the semicolon is important to be added, otherwise the\n configuration won't be accepted by Junos):\n\n .. code-block:: text\n\n description \"Interface description\";\n '''\n value = traverse(dictionary, field)\n if value is None:\n return ''\n if prepend is None:\n prepend = field.replace('_', '-')\n if append is None:\n if __grains__['os'] in ('junos',):\n append = ';'\n else:\n append = ''\n if quotes:\n value = '\"{value}\"'.format(value=value)\n return '{prepend} {value}{append}'.format(prepend=prepend,\n value=value,\n append=append)\n" ]
# -*- coding: utf-8 -*- ''' NAPALM Formula helpers ====================== .. versionadded:: 2019.2.0 This is an Execution Module providing helpers for various NAPALM formulas, e.g., napalm-interfaces-formula, napalm-bgp-formula, napalm-ntp-formula etc., meant to provide various helper functions to make the templates more readable. ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import copy import logging import fnmatch # Import salt modules import salt.utils.napalm import salt.ext.six as six import salt.utils.dictupdate from salt.defaults import DEFAULT_TARGET_DELIM try: from salt.utils.data import traverse_dict_and_list as _traverse_dict_and_list except ImportError: from salt.utils import traverse_dict_and_list as _traverse_dict_and_list __proxyenabled__ = ['*'] __virtualname__ = 'napalm_formula' log = logging.getLogger(__name__) def __virtual__(): ''' Available only on NAPALM Minions. ''' return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__) def _container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Generate all the possible paths within an OpenConfig-like object. This function returns a generator. ''' if not key: key = '' if not container: container = 'config' for model_key, model_value in six.iteritems(model): if key: key_depth = '{prev_key}{delim}{cur_key}'.format(prev_key=key, delim=delim, cur_key=model_key) else: key_depth = model_key if model_key == container: yield key_depth else: for value in _container_path(model_value, key=key_depth, container=container, delim=delim): yield value def container_path(model, key=None, container=None, delim=DEFAULT_TARGET_DELIM): ''' Return the list of all the possible paths in a container, down to the ``config`` container. This function can be used to verify that the ``model`` is a Python object correctly structured and respecting the OpenConfig hierarchy. model The OpenConfig-structured object to inspect. delim: ``:`` The key delimiter. In particular cases, it is indicated to use ``//`` as ``:`` might be already used in various cases, e.g., IPv6 addresses, interface name (e.g., Juniper QFX series), etc. CLI Example: .. code-block:: bash salt '*' napalm_formula.container_path "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" The example above would return a list with the following element: ``interfaces:interface:Ethernet1:config`` which is the only possible path in that hierarchy. Other output examples: .. code-block:: text - interfaces:interface:Ethernet1:config - interfaces:interface:Ethernet1:subinterfaces:subinterface:0:config - interfaces:interface:Ethernet2:config ''' return list(_container_path(model)) def setval(key, val, dict_=None, delim=DEFAULT_TARGET_DELIM): ''' Set a value under the dictionary hierarchy identified under the key. The target 'foo/bar/baz' returns the dictionary hierarchy {'foo': {'bar': {'baz': {}}}}. .. note:: Currently this doesn't work with integers, i.e. cannot build lists dynamically. CLI Example: .. code-block:: bash salt '*' formula.setval foo:baz:bar True ''' if not dict_: dict_ = {} prev_hier = dict_ dict_hier = key.split(delim) for each in dict_hier[:-1]: if each not in prev_hier: prev_hier[each] = {} prev_hier = prev_hier[each] prev_hier[dict_hier[-1]] = copy.deepcopy(val) return dict_ def traverse(data, key, default=None, delimiter=DEFAULT_TARGET_DELIM): ''' Traverse a dict or list using a colon-delimited (or otherwise delimited, using the ``delimiter`` param) target string. The target ``foo:bar:0`` will return ``data['foo']['bar'][0]`` if this value exists, and will otherwise return the dict in the default argument. Function will automatically determine the target type. The target ``foo:bar:0`` will return data['foo']['bar'][0] if data like ``{'foo':{'bar':['baz']}}`` , if data like ``{'foo':{'bar':{'0':'baz'}}}`` then ``return data['foo']['bar']['0']`` CLI Example: .. code-block:: bash salt '*' napalm_formula.traverse "{'foo': {'bar': {'baz': True}}}" foo:baz:bar ''' return _traverse_dict_and_list(data, key, default=default, delimiter=delimiter) def dictupdate(dest, upd, recursive_update=True, merge_lists=False): ''' Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like ``FunctionWrapper``). If ``merge_lists=True``, will aggregate list object types instead of replace. The list in ``upd`` is added to the list in ``dest``, so the resulting list is ``dest[key] + upd[key]``. This behaviour is only activated when ``recursive_update=True``. By default ``merge_lists=False``. ''' return salt.utils.dictupdate.update(dest, upd, recursive_update=recursive_update, merge_lists=merge_lists) def defaults(model, defaults_, delim='//', flipped_merge=False): ''' Apply the defaults to a Python dictionary having the structure as described in the OpenConfig standards. model The OpenConfig model to apply the defaults to. defaults The dictionary of defaults. This argument must equally be structured with respect to the OpenConfig standards. For ease of use, the keys of these support glob matching, therefore we don't have to provide the defaults for each entity but only for the entity type. See an example below. delim: ``//`` The key delimiter to use. Generally, ``//`` should cover all the possible cases, and you don't need to override this value. flipped_merge: ``False`` Whether should merge the model into the defaults, or the defaults into the model. Default: ``False`` (merge the model into the defaults, i.e., any defaults would be overridden by the values from the ``model``). CLI Example: .. code-block:: bash salt '*' napalm_formula.defaults "{'interfaces': {'interface': {'Ethernet1': {'config': {'name': 'Ethernet1'}}}}}" "{'interfaces': {'interface': {'*': {'config': {'enabled': True}}}}}" As one can notice in the example above, the ``*`` corresponds to the interface name, therefore, the defaults will be applied on all the interfaces. ''' merged = {} log.debug('Applying the defaults:') log.debug(defaults_) log.debug('openconfig like dictionary:') log.debug(model) for model_path in _container_path(model, delim=delim): for default_path in _container_path(defaults_, delim=delim): log.debug('Comparing %s to %s', model_path, default_path) if not fnmatch.fnmatch(model_path, default_path) or\ not len(model_path.split(delim)) == len(default_path.split(delim)): continue log.debug('%s matches %s', model_path, default_path) # If there's a match, it will build the dictionary from the top devault_val = _traverse_dict_and_list(defaults_, default_path, delimiter=delim) merged = setval(model_path, devault_val, dict_=merged, delim=delim) log.debug('Complete default dictionary') log.debug(merged) log.debug('Merging with the model') log.debug(model) if flipped_merge: return salt.utils.dictupdate.update(model, merged) return salt.utils.dictupdate.update(merged, model) def render_field(dictionary, field, prepend=None, append=None, quotes=False, **opts): ''' Render a field found under the ``field`` level of the hierarchy in the ``dictionary`` object. This is useful to render a field in a Jinja template without worrying that the hierarchy might not exist. For example if we do the following in Jinja: ``{{ interfaces.interface.Ethernet5.config.description }}`` for the following object: ``{'interfaces': {'interface': {'Ethernet1': {'config': {'enabled': True}}}}}`` it would error, as the ``Ethernet5`` key does not exist. With this helper, we can skip this and avoid existence checks. This must be however used with care. dictionary The dictionary to traverse. field The key name or part to traverse in the ``dictionary``. prepend: ``None`` The text to prepend in front of the text. Usually, we need to have the name of the field too when generating the configuration. append: ``None`` Text to append at the end. quotes: ``False`` Whether should wrap the text around quotes. CLI Example: .. code-block:: bash salt '*' napalm_formula.render_field "{'enabled': True}" enabled # This would return the value of the ``enabled`` leaf key salt '*' napalm_formula.render_field "{'enabled': True}" description # This would not error Jinja usage example: .. code-block:: jinja {%- set config = {'enabled': True, 'description': 'Interface description'} %} {{ salt.napalm_formula.render_field(config, 'description', quotes=True) }} The example above would be rendered on Arista / Cisco as: .. code-block:: text description "Interface description" While on Junos (the semicolon is important to be added, otherwise the configuration won't be accepted by Junos): .. code-block:: text description "Interface description"; ''' value = traverse(dictionary, field) if value is None: return '' if prepend is None: prepend = field.replace('_', '-') if append is None: if __grains__['os'] in ('junos',): append = ';' else: append = '' if quotes: value = '"{value}"'.format(value=value) return '{prepend} {value}{append}'.format(prepend=prepend, value=value, append=append)
saltstack/salt
salt/states/win_servermanager.py
installed
python
def installed(name, features=None, recurse=False, restart=False, source=None, exclude=None): ''' Install the windows feature. To install a single feature, use the ``name`` parameter. To install multiple features, use the ``features`` parameter. .. note:: Some features require reboot after un/installation. If so, until the server is restarted other features can not be installed! Args: name (str): Short name of the feature (the right column in win_servermanager.list_available). This can be a single feature or a string of features in a comma delimited list (no spaces) .. note:: A list is not allowed in the name parameter of any state. Use the ``features`` parameter if you want to pass the features as a list features (Optional[list]): A list of features to install. If this is passed it will be used instead of the ``name`` parameter. .. versionadded:: 2018.3.0 recurse (Optional[bool]): Install all sub-features as well. If the feature is installed but one of its sub-features are not installed set this will install additional sub-features source (Optional[str]): Path to the source files if missing from the target system. None means that the system will use windows update services to find the required files. Default is None restart (Optional[bool]): Restarts the computer when installation is complete, if required by the role/feature installed. Default is False exclude (Optional[str]): The name of the feature to exclude when installing the named feature. This can be a single feature, a string of features in a comma-delimited list (no spaces), or a list of features. .. warning:: As there is no exclude option for the ``Add-WindowsFeature`` or ``Install-WindowsFeature`` PowerShell commands the features named in ``exclude`` will be installed with other sub-features and will then be removed. **If the feature named in ``exclude`` is not a sub-feature of one of the installed items it will still be removed.** Example: Do not use the role or feature names mentioned in the PKGMGR documentation. To get a list of available roles and features run the following command: .. code-block:: bash salt <minion_name> win_servermanager.list_available Use the name in the right column of the results. .. code-block:: yaml # Installs the IIS Web Server Role (Web-Server) IIS-WebServerRole: win_servermanager.installed: - recurse: True - name: Web-Server # Install multiple features, exclude the Web-Service install_multiple_features: win_servermanager.installed: - recurse: True - features: - RemoteAccess - XPS-Viewer - SNMP-Service - exclude: - Web-Server ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} # Check if features is not passed, use name. Split commas if features is None: features = name.split(',') # Make sure features is a list, split commas if not isinstance(features, list): features = features.split(',') # Determine if the feature is installed old = __salt__['win_servermanager.list_installed']() cur_feat = [] for feature in features: if feature not in old: ret['changes'][feature] = \ 'Will be installed recurse={0}'.format(recurse) elif recurse: ret['changes'][feature] = \ 'Already installed but might install sub-features' else: cur_feat.append(feature) if cur_feat: cur_feat.insert(0, 'The following features are already installed:') ret['comment'] = '\n- '.join(cur_feat) if not ret['changes']: return ret if __opts__['test']: ret['result'] = None return ret # Install the features status = __salt__['win_servermanager.install']( features, recurse=recurse, restart=restart, source=source, exclude=exclude) ret['result'] = status['Success'] # Show items failed to install fail_feat = [] new_feat = [] rem_feat = [] for feature in status['Features']: # Features that failed to install or be removed if not status['Features'][feature].get('Success', True): fail_feat.append('- {0}'.format(feature)) # Features that installed elif '(exclude)' not in status['Features'][feature]['Message']: new_feat.append('- {0}'.format(feature)) # Show items that were removed because they were part of `exclude` elif '(exclude)' in status['Features'][feature]['Message']: rem_feat.append('- {0}'.format(feature)) if fail_feat: fail_feat.insert(0, 'Failed to install the following:') if new_feat: new_feat.insert(0, 'Installed the following:') if rem_feat: rem_feat.insert(0, 'Removed the following (exclude):') ret['comment'] = '\n'.join(fail_feat + new_feat + rem_feat) # Get the changes new = __salt__['win_servermanager.list_installed']() ret['changes'] = salt.utils.data.compare_dicts(old, new) return ret
Install the windows feature. To install a single feature, use the ``name`` parameter. To install multiple features, use the ``features`` parameter. .. note:: Some features require reboot after un/installation. If so, until the server is restarted other features can not be installed! Args: name (str): Short name of the feature (the right column in win_servermanager.list_available). This can be a single feature or a string of features in a comma delimited list (no spaces) .. note:: A list is not allowed in the name parameter of any state. Use the ``features`` parameter if you want to pass the features as a list features (Optional[list]): A list of features to install. If this is passed it will be used instead of the ``name`` parameter. .. versionadded:: 2018.3.0 recurse (Optional[bool]): Install all sub-features as well. If the feature is installed but one of its sub-features are not installed set this will install additional sub-features source (Optional[str]): Path to the source files if missing from the target system. None means that the system will use windows update services to find the required files. Default is None restart (Optional[bool]): Restarts the computer when installation is complete, if required by the role/feature installed. Default is False exclude (Optional[str]): The name of the feature to exclude when installing the named feature. This can be a single feature, a string of features in a comma-delimited list (no spaces), or a list of features. .. warning:: As there is no exclude option for the ``Add-WindowsFeature`` or ``Install-WindowsFeature`` PowerShell commands the features named in ``exclude`` will be installed with other sub-features and will then be removed. **If the feature named in ``exclude`` is not a sub-feature of one of the installed items it will still be removed.** Example: Do not use the role or feature names mentioned in the PKGMGR documentation. To get a list of available roles and features run the following command: .. code-block:: bash salt <minion_name> win_servermanager.list_available Use the name in the right column of the results. .. code-block:: yaml # Installs the IIS Web Server Role (Web-Server) IIS-WebServerRole: win_servermanager.installed: - recurse: True - name: Web-Server # Install multiple features, exclude the Web-Service install_multiple_features: win_servermanager.installed: - recurse: True - features: - RemoteAccess - XPS-Viewer - SNMP-Service - exclude: - Web-Server
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_servermanager.py#L25-L189
null
# -*- coding: utf-8 -*- ''' Manage Windows features via the ServerManager powershell module. Can install and remove roles/features. :maintainer: Shane Lee <slee@saltstack.com> :platform: Windows Server 2008R2 or greater :depends: win_servermanager.install :depends: win_servermanager.remove ''' from __future__ import absolute_import, unicode_literals, print_function # Import salt modules import salt.utils.data import salt.utils.versions def __virtual__(): ''' Load only if win_servermanager is loaded ''' return 'win_servermanager' if 'win_servermanager.install' in __salt__ else False def removed(name, features=None, remove_payload=False, restart=False): ''' Remove the windows feature To remove a single feature, use the ``name`` parameter. To remove multiple features, use the ``features`` parameter. Args: name (str): Short name of the feature (the right column in win_servermanager.list_available). This can be a single feature or a string of features in a comma-delimited list (no spaces) .. note:: A list is not allowed in the name parameter of any state. Use the ``features`` parameter if you want to pass the features as a list features (Optional[list]): A list of features to remove. If this is passed it will be used instead of the ``name`` parameter. .. versionadded:: 2018.3.0 remove_payload (Optional[bool]): True will cause the feature to be removed from the side-by-side store. To install the feature in the future you will need to specify the ``source`` restart (Optional[bool]): Restarts the computer when uninstall is complete if required by the role/feature uninstall. Default is False .. note:: Some features require a reboot after uninstall. If so the feature will not be completely uninstalled until the server is restarted. Example: Do not use the role or feature names mentioned in the PKGMGR documentation. To get a list of available roles and features run the following command: .. code-block:: bash salt <minion_name> win_servermanager.list_available Use the name in the right column of the results. .. code-block:: yaml # Uninstall the IIS Web Server Rol (Web-Server) IIS-WebserverRole: win_servermanager.removed: - name: Web-Server # Uninstall multiple features, reboot if required uninstall_multiple_features: win_servermanager.removed: - features: - RemoteAccess - XPX-Viewer - SNMP-Service - restart: True ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} # Check if features is not passed, use name. Split commas if features is None: features = name.split(',') # Make sure features is a list, split commas if not isinstance(features, list): features = features.split(',') # Determine if the feature is installed old = __salt__['win_servermanager.list_installed']() rem_feat = [] for feature in features: if feature in old: ret['changes'][feature] = 'Will be removed' else: rem_feat.append(feature) if rem_feat: rem_feat.insert(0, 'The following features are not installed:') ret['comment'] = '\n- '.join(rem_feat) if not ret['changes']: return ret if __opts__['test']: ret['result'] = None return ret # Remove the features status = __salt__['win_servermanager.remove']( features, remove_payload=remove_payload, restart=restart) ret['result'] = status['Success'] # Some items failed to uninstall fail_feat = [] rem_feat = [] for feature in status['Features']: # Use get because sometimes 'Success' isn't defined such as when the # feature is already uninstalled if not status['Features'][feature].get('Success', True): # Show items that failed to uninstall fail_feat.append('- {0}'.format(feature)) else: # Show items that uninstalled rem_feat.append('- {0}'.format(feature)) if fail_feat: fail_feat.insert(0, 'Failed to remove the following:') if rem_feat: rem_feat.insert(0, 'Removed the following:') ret['comment'] = '\n'.join(fail_feat + rem_feat) # Get the changes new = __salt__['win_servermanager.list_installed']() ret['changes'] = salt.utils.data.compare_dicts(old, new) return ret
saltstack/salt
salt/states/win_servermanager.py
removed
python
def removed(name, features=None, remove_payload=False, restart=False): ''' Remove the windows feature To remove a single feature, use the ``name`` parameter. To remove multiple features, use the ``features`` parameter. Args: name (str): Short name of the feature (the right column in win_servermanager.list_available). This can be a single feature or a string of features in a comma-delimited list (no spaces) .. note:: A list is not allowed in the name parameter of any state. Use the ``features`` parameter if you want to pass the features as a list features (Optional[list]): A list of features to remove. If this is passed it will be used instead of the ``name`` parameter. .. versionadded:: 2018.3.0 remove_payload (Optional[bool]): True will cause the feature to be removed from the side-by-side store. To install the feature in the future you will need to specify the ``source`` restart (Optional[bool]): Restarts the computer when uninstall is complete if required by the role/feature uninstall. Default is False .. note:: Some features require a reboot after uninstall. If so the feature will not be completely uninstalled until the server is restarted. Example: Do not use the role or feature names mentioned in the PKGMGR documentation. To get a list of available roles and features run the following command: .. code-block:: bash salt <minion_name> win_servermanager.list_available Use the name in the right column of the results. .. code-block:: yaml # Uninstall the IIS Web Server Rol (Web-Server) IIS-WebserverRole: win_servermanager.removed: - name: Web-Server # Uninstall multiple features, reboot if required uninstall_multiple_features: win_servermanager.removed: - features: - RemoteAccess - XPX-Viewer - SNMP-Service - restart: True ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} # Check if features is not passed, use name. Split commas if features is None: features = name.split(',') # Make sure features is a list, split commas if not isinstance(features, list): features = features.split(',') # Determine if the feature is installed old = __salt__['win_servermanager.list_installed']() rem_feat = [] for feature in features: if feature in old: ret['changes'][feature] = 'Will be removed' else: rem_feat.append(feature) if rem_feat: rem_feat.insert(0, 'The following features are not installed:') ret['comment'] = '\n- '.join(rem_feat) if not ret['changes']: return ret if __opts__['test']: ret['result'] = None return ret # Remove the features status = __salt__['win_servermanager.remove']( features, remove_payload=remove_payload, restart=restart) ret['result'] = status['Success'] # Some items failed to uninstall fail_feat = [] rem_feat = [] for feature in status['Features']: # Use get because sometimes 'Success' isn't defined such as when the # feature is already uninstalled if not status['Features'][feature].get('Success', True): # Show items that failed to uninstall fail_feat.append('- {0}'.format(feature)) else: # Show items that uninstalled rem_feat.append('- {0}'.format(feature)) if fail_feat: fail_feat.insert(0, 'Failed to remove the following:') if rem_feat: rem_feat.insert(0, 'Removed the following:') ret['comment'] = '\n'.join(fail_feat + rem_feat) # Get the changes new = __salt__['win_servermanager.list_installed']() ret['changes'] = salt.utils.data.compare_dicts(old, new) return ret
Remove the windows feature To remove a single feature, use the ``name`` parameter. To remove multiple features, use the ``features`` parameter. Args: name (str): Short name of the feature (the right column in win_servermanager.list_available). This can be a single feature or a string of features in a comma-delimited list (no spaces) .. note:: A list is not allowed in the name parameter of any state. Use the ``features`` parameter if you want to pass the features as a list features (Optional[list]): A list of features to remove. If this is passed it will be used instead of the ``name`` parameter. .. versionadded:: 2018.3.0 remove_payload (Optional[bool]): True will cause the feature to be removed from the side-by-side store. To install the feature in the future you will need to specify the ``source`` restart (Optional[bool]): Restarts the computer when uninstall is complete if required by the role/feature uninstall. Default is False .. note:: Some features require a reboot after uninstall. If so the feature will not be completely uninstalled until the server is restarted. Example: Do not use the role or feature names mentioned in the PKGMGR documentation. To get a list of available roles and features run the following command: .. code-block:: bash salt <minion_name> win_servermanager.list_available Use the name in the right column of the results. .. code-block:: yaml # Uninstall the IIS Web Server Rol (Web-Server) IIS-WebserverRole: win_servermanager.removed: - name: Web-Server # Uninstall multiple features, reboot if required uninstall_multiple_features: win_servermanager.removed: - features: - RemoteAccess - XPX-Viewer - SNMP-Service - restart: True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_servermanager.py#L192-L320
null
# -*- coding: utf-8 -*- ''' Manage Windows features via the ServerManager powershell module. Can install and remove roles/features. :maintainer: Shane Lee <slee@saltstack.com> :platform: Windows Server 2008R2 or greater :depends: win_servermanager.install :depends: win_servermanager.remove ''' from __future__ import absolute_import, unicode_literals, print_function # Import salt modules import salt.utils.data import salt.utils.versions def __virtual__(): ''' Load only if win_servermanager is loaded ''' return 'win_servermanager' if 'win_servermanager.install' in __salt__ else False def installed(name, features=None, recurse=False, restart=False, source=None, exclude=None): ''' Install the windows feature. To install a single feature, use the ``name`` parameter. To install multiple features, use the ``features`` parameter. .. note:: Some features require reboot after un/installation. If so, until the server is restarted other features can not be installed! Args: name (str): Short name of the feature (the right column in win_servermanager.list_available). This can be a single feature or a string of features in a comma delimited list (no spaces) .. note:: A list is not allowed in the name parameter of any state. Use the ``features`` parameter if you want to pass the features as a list features (Optional[list]): A list of features to install. If this is passed it will be used instead of the ``name`` parameter. .. versionadded:: 2018.3.0 recurse (Optional[bool]): Install all sub-features as well. If the feature is installed but one of its sub-features are not installed set this will install additional sub-features source (Optional[str]): Path to the source files if missing from the target system. None means that the system will use windows update services to find the required files. Default is None restart (Optional[bool]): Restarts the computer when installation is complete, if required by the role/feature installed. Default is False exclude (Optional[str]): The name of the feature to exclude when installing the named feature. This can be a single feature, a string of features in a comma-delimited list (no spaces), or a list of features. .. warning:: As there is no exclude option for the ``Add-WindowsFeature`` or ``Install-WindowsFeature`` PowerShell commands the features named in ``exclude`` will be installed with other sub-features and will then be removed. **If the feature named in ``exclude`` is not a sub-feature of one of the installed items it will still be removed.** Example: Do not use the role or feature names mentioned in the PKGMGR documentation. To get a list of available roles and features run the following command: .. code-block:: bash salt <minion_name> win_servermanager.list_available Use the name in the right column of the results. .. code-block:: yaml # Installs the IIS Web Server Role (Web-Server) IIS-WebServerRole: win_servermanager.installed: - recurse: True - name: Web-Server # Install multiple features, exclude the Web-Service install_multiple_features: win_servermanager.installed: - recurse: True - features: - RemoteAccess - XPS-Viewer - SNMP-Service - exclude: - Web-Server ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} # Check if features is not passed, use name. Split commas if features is None: features = name.split(',') # Make sure features is a list, split commas if not isinstance(features, list): features = features.split(',') # Determine if the feature is installed old = __salt__['win_servermanager.list_installed']() cur_feat = [] for feature in features: if feature not in old: ret['changes'][feature] = \ 'Will be installed recurse={0}'.format(recurse) elif recurse: ret['changes'][feature] = \ 'Already installed but might install sub-features' else: cur_feat.append(feature) if cur_feat: cur_feat.insert(0, 'The following features are already installed:') ret['comment'] = '\n- '.join(cur_feat) if not ret['changes']: return ret if __opts__['test']: ret['result'] = None return ret # Install the features status = __salt__['win_servermanager.install']( features, recurse=recurse, restart=restart, source=source, exclude=exclude) ret['result'] = status['Success'] # Show items failed to install fail_feat = [] new_feat = [] rem_feat = [] for feature in status['Features']: # Features that failed to install or be removed if not status['Features'][feature].get('Success', True): fail_feat.append('- {0}'.format(feature)) # Features that installed elif '(exclude)' not in status['Features'][feature]['Message']: new_feat.append('- {0}'.format(feature)) # Show items that were removed because they were part of `exclude` elif '(exclude)' in status['Features'][feature]['Message']: rem_feat.append('- {0}'.format(feature)) if fail_feat: fail_feat.insert(0, 'Failed to install the following:') if new_feat: new_feat.insert(0, 'Installed the following:') if rem_feat: rem_feat.insert(0, 'Removed the following (exclude):') ret['comment'] = '\n'.join(fail_feat + new_feat + rem_feat) # Get the changes new = __salt__['win_servermanager.list_installed']() ret['changes'] = salt.utils.data.compare_dicts(old, new) return ret
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
_check_cors_origin
python
def _check_cors_origin(origin, allowed_origins): ''' Check if an origin match cors allowed origins ''' if isinstance(allowed_origins, list): if origin in allowed_origins: return origin elif allowed_origins == '*': return allowed_origins elif allowed_origins == origin: # Cors origin is either * or specific origin return allowed_origins
Check if an origin match cors allowed origins
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1750-L1761
null
# encoding: utf-8 ''' A non-blocking REST API for Salt ================================ .. py:currentmodule:: salt.netapi.rest_tornado.saltnado :depends: - tornado Python module :configuration: All authentication is done through Salt's :ref:`external auth <acl-eauth>` system which requires additional configuration not described here. In order to run rest_tornado with the salt-master add the following to the Salt master config file. .. code-block:: yaml rest_tornado: # can be any port port: 8000 # address to bind to (defaults to 0.0.0.0) address: 0.0.0.0 # socket backlog backlog: 128 ssl_crt: /etc/pki/api/certs/server.crt # no need to specify ssl_key if cert and key # are in one single file ssl_key: /etc/pki/api/certs/server.key debug: False disable_ssl: False webhook_disable_auth: False cors_origin: null .. _rest_tornado-auth: Authentication -------------- Authentication is performed by passing a session token with each request. Tokens are generated via the :py:class:`SaltAuthHandler` URL. The token may be sent in one of two ways: * Include a custom header named :mailheader:`X-Auth-Token`. * Sent via a cookie. This option is a convenience for HTTP clients that automatically handle cookie support (such as browsers). .. seealso:: You can bypass the session handling via the :py:class:`RunSaltAPIHandler` URL. CORS ---- rest_tornado supports Cross-site HTTP requests out of the box. It is by default deactivated and controlled by the `cors_origin` config key. You can allow all origins by settings `cors_origin` to `*`. You can allow only one origin with this configuration: .. code-block:: yaml rest_tornado: cors_origin: http://salt.yourcompany.com You can also be more specific and select only a few allowed origins by using a list. For example: .. code-block:: yaml rest_tornado: cors_origin: - http://salt.yourcompany.com - http://salt-preprod.yourcampany.com The format for origin are full URL, with both scheme and port if not standard. In this case, rest_tornado will check if the Origin header is in the allowed list if it's the case allow the origin. Else it will returns nothing, effectively preventing the origin to make request. For reference, CORS is a mechanism used by browser to allow (or disallow) requests made from browser from a different origin than salt-api. It's complementary to Authentication and mandatory only if you plan to use a salt client developed as a Javascript browser application. Usage ----- Commands are sent to a running Salt master via this module by sending HTTP requests to the URLs detailed below. .. admonition:: Content negotiation This REST interface is flexible in what data formats it will accept as well as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded). * Specify the format of data in the request body by including the :mailheader:`Content-Type` header. * Specify the desired data format for the response body with the :mailheader:`Accept` header. Data sent in :http:method:`post` and :http:method:`put` requests must be in the format of a list of lowstate dictionaries. This allows multiple commands to be executed in a single HTTP request. .. glossary:: lowstate A dictionary containing various keys that instruct Salt which command to run, where that command lives, any parameters for that command, any authentication credentials, what returner to use, etc. Salt uses the lowstate data format internally in many places to pass command data between functions. Salt also uses lowstate for the :ref:`LocalClient() <python-api>` Python API interface. The following example (in JSON format) causes Salt to execute two commands:: [{ "client": "local", "tgt": "*", "fun": "test.fib", "arg": ["10"] }, { "client": "runner", "fun": "jobs.lookup_jid", "jid": "20130603122505459265" }] Multiple commands in a Salt API request will be executed in serial and makes no gaurantees that all commands will run. Meaning that if test.fib (from the example above) had an exception, the API would still execute "jobs.lookup_jid". Responses to these lowstates are an in-order list of dicts containing the return data, a yaml response could look like:: - ms-1: true ms-2: true - ms-1: foo ms-2: bar In the event of an exception while executing a command the return for that lowstate will be a string, for example if no minions matched the first lowstate we would get a return like:: - No minions matched the target. No command was sent, no jid was assigned. - ms-1: true ms-2: true .. admonition:: x-www-form-urlencoded Sending JSON or YAML in the request body is simple and most flexible, however sending data in urlencoded format is also supported with the caveats below. It is the default format for HTML forms, many JavaScript libraries, and the :command:`curl` command. For example, the equivalent to running ``salt '*' test.ping`` is sending ``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body. Caveats: * Only a single command may be sent per HTTP request. * Repeating the ``arg`` parameter multiple times will cause those parameters to be combined into a single list. Note, some popular frameworks and languages (notably jQuery, PHP, and Ruby on Rails) will automatically append empty brackets onto repeated parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``, ``arg[]=two``. This is not supported; send JSON or YAML instead. .. |req_token| replace:: a session token from :py:class:`~SaltAuthHandler`. .. |req_accept| replace:: the desired response format. .. |req_ct| replace:: the format of the request body. .. |res_ct| replace:: the format of the response body; depends on the :mailheader:`Accept` request header. .. |200| replace:: success .. |400| replace:: bad request .. |401| replace:: authentication required .. |406| replace:: requested Content-Type not available .. |500| replace:: internal server error ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import time import fnmatch import logging from copy import copy from collections import defaultdict # pylint: disable=import-error import cgi import tornado.escape import tornado.httpserver import tornado.ioloop import tornado.web import tornado.gen from tornado.concurrent import Future # pylint: enable=import-error # salt imports import salt.ext.six as six import salt.netapi import salt.utils.args import salt.utils.event import salt.utils.json import salt.utils.minions import salt.utils.yaml import salt.utils.zeromq from salt.utils.event import tagify import salt.client import salt.runner import salt.auth from salt.exceptions import ( AuthenticationError, AuthorizationError, EauthAuthenticationError ) salt.utils.zeromq.install_zmq() json = salt.utils.json.import_json() log = logging.getLogger(__name__) def _json_dumps(obj, **kwargs): ''' Invoke salt.utils.json.dumps using the alternate json module loaded using salt.utils.json.import_json(). This ensures that we properly encode any strings in the object before we perform the serialization. ''' return salt.utils.json.dumps(obj, _json_module=json, **kwargs) # The clients rest_cherrypi supports. We want to mimic the interface, but not # necessarily use the same API under the hood # # all of these require coordinating minion stuff # - "local" (done) # - "local_async" (done) # # master side # - "runner" (done) # - "wheel" (need asynchronous api...) AUTH_TOKEN_HEADER = 'X-Auth-Token' AUTH_COOKIE_NAME = 'session_id' class TimeoutException(Exception): pass class Any(Future): ''' Future that wraps other futures to "block" until one is done ''' def __init__(self, futures): # pylint: disable=E1002 super(Any, self).__init__() for future in futures: future.add_done_callback(self.done_callback) def done_callback(self, future): # Any is completed once one is done, we don't set for the rest if not self.done(): self.set_result(future) class EventListener(object): ''' Class responsible for listening to the salt master event bus and updating futures. This is the core of what makes this asynchronous, this allows us to do non-blocking work in the main processes and "wait" for an event to happen ''' def __init__(self, mod_opts, opts): self.mod_opts = mod_opts self.opts = opts self.event = salt.utils.event.get_event( 'master', opts['sock_dir'], opts['transport'], opts=opts, listen=True, io_loop=tornado.ioloop.IOLoop.current() ) # tag -> list of futures self.tag_map = defaultdict(list) # request_obj -> list of (tag, future) self.request_map = defaultdict(list) # map of future -> timeout_callback self.timeout_map = {} self.event.set_event_handler(self._handle_event_socket_recv) def clean_by_request(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, matcher, future in self.request_map[request]: # timeout the future self._timeout_future(tag, matcher, future) # remove the timeout if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] del self.request_map[request] @staticmethod def prefix_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag.startswith(tag) @staticmethod def exact_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag == tag def get_event(self, request, tag='', matcher=prefix_matcher.__func__, callback=None, timeout=None ): ''' Get an event (asynchronous of course) return a future that will get it later ''' # if the request finished, no reason to allow event fetching, since we # can't send back to the client if request._finished: future = Future() future.set_exception(TimeoutException()) return future future = Future() if callback is not None: def handle_future(future): tornado.ioloop.IOLoop.current().add_callback(callback, future) future.add_done_callback(handle_future) # add this tag and future to the callbacks self.tag_map[(tag, matcher)].append(future) self.request_map[request].append((tag, matcher, future)) if timeout: timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, matcher, future) self.timeout_map[future] = timeout_future return future def _timeout_future(self, tag, matcher, future): ''' Timeout a specific future ''' if (tag, matcher) not in self.tag_map: return if not future.done(): future.set_exception(TimeoutException()) self.tag_map[(tag, matcher)].remove(future) if not self.tag_map[(tag, matcher)]: del self.tag_map[(tag, matcher)] def _handle_event_socket_recv(self, raw): ''' Callback for events on the event sub socket ''' mtag, data = self.event.unpack(raw, self.event.serial) # see if we have any futures that need this info: for (tag, matcher), futures in six.iteritems(self.tag_map): try: is_matched = matcher(mtag, tag) except Exception: log.error('Failed to run a matcher.', exc_info=True) is_matched = False if not is_matched: continue for future in futures: if future.done(): continue future.set_result({'data': data, 'tag': mtag}) self.tag_map[(tag, matcher)].remove(future) if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish() class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Handler for login requests ''' def get(self): ''' All logins are done over post, this is a parked endpoint .. http:get:: /login :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/login .. code-block:: text GET /login HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 401 Unauthorized Content-Type: application/json Content-Length: 58 {"status": "401 Unauthorized", "return": "Please log in"} ''' self.set_status(401) self.set_header('WWW-Authenticate', 'Session') ret = {'status': '401 Unauthorized', 'return': 'Please log in'} self.write(self.serialize(ret)) # TODO: make asynchronous? Underlying library isn't... and we ARE making disk calls :( def post(self): ''' :ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system .. http:post:: /login :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :form eauth: the eauth backend configured for the user :form username: username :form password: password :status 200: |200| :status 400: |400| :status 401: |401| :status 406: |406| :status 500: |500| **Example request:** .. code-block:: bash curl -si localhost:8000/login \\ -H "Accept: application/json" \\ -d username='saltuser' \\ -d password='saltpass' \\ -d eauth='pam' .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Content-Length: 42 Content-Type: application/x-www-form-urlencoded Accept: application/json username=saltuser&password=saltpass&eauth=pam **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Length: 206 X-Auth-Token: 6d1b722e Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/ {"return": { "token": "6d1b722e", "start": 1363805943.776223, "expire": 1363849143.776224, "user": "saltuser", "eauth": "pam", "perms": [ "grains.*", "status.*", "sys.*", "test.*" ] }} ''' try: if not isinstance(self.request_payload, dict): self.send_error(400) return creds = {'username': self.request_payload['username'], 'password': self.request_payload['password'], 'eauth': self.request_payload['eauth'], } # if any of the args are missing, its a bad request except KeyError: self.send_error(400) return token = self.application.auth.mk_token(creds) if 'token' not in token: # TODO: nicer error message # 'Could not authenticate using provided credentials') self.send_error(401) # return since we don't want to execute any more return # Grab eauth config for the current backend for the current user try: eauth = self.application.opts['external_auth'][token['eauth']] # Get sum of '*' perms, user-specific perms, and group-specific perms _perms = eauth.get(token['name'], []) _perms.extend(eauth.get('*', [])) if 'groups' in token and token['groups']: user_groups = set(token['groups']) eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')]) for group in user_groups & eauth_groups: _perms.extend(eauth['{0}%'.format(group)]) # dedup. perm can be a complex dict, so we cant use set perms = [] for perm in _perms: if perm not in perms: perms.append(perm) # If we can't find the creds, then they aren't authorized except KeyError: self.send_error(401) return except (AttributeError, IndexError): log.debug( "Configuration for external_auth malformed for eauth '%s', " "and user '%s'.", token.get('eauth'), token.get('name'), exc_info=True ) # TODO better error -- 'Configuration for external_auth could not be read.' self.send_error(500) return ret = {'return': [{ 'token': token['token'], 'expire': token['expire'], 'start': token['start'], 'user': token['name'], 'eauth': token['eauth'], 'perms': perms, }]} self.write(self.serialize(ret)) class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Main API handler for base "/" ''' def get(self): ''' An endpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: text GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"} ''' ret = {"clients": list(self.saltclients.keys()), "return": "Welcome"} self.write(self.serialize(ret)) @tornado.web.asynchronous def post(self): ''' Send one or more Salt commands (lowstates) in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ -d arg .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** Responses are an in-order list of the lowstate's return data. In the event of an exception running a command the return will be a string instead of a mapping. .. code-block:: text HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true .. admonition:: multiple commands Note that if multiple :term:`lowstate` structures are sent, the Salt API will execute them in serial, and will not stop execution upon failure of a previous job. If you need to have commands executed in order and stop on failure please use compound-command-execution. ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.disbatch() @tornado.gen.coroutine def disbatch(self): ''' Disbatch all lowstates to the appropriate clients ''' ret = [] # check clients before going, we want to throw 400 if one is bad for low in self.lowstate: if not self._verify_client(low): return # Make sure we have 'token' or 'username'/'password' in each low chunk. # Salt will verify the credentials are correct. if self.token is not None and 'token' not in low: low['token'] = self.token if not (('token' in low) or ('username' in low and 'password' in low and 'eauth' in low)): ret.append('Failed to authenticate') break # disbatch to the correct handler try: chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low) ret.append(chunk_ret) except (AuthenticationError, AuthorizationError, EauthAuthenticationError): ret.append('Failed to authenticate') break except Exception as ex: ret.append('Unexpected exception while handling request: {0}'.format(ex)) log.error('Unexpected exception while handling request:', exc_info=True) if not self._finished: self.write(self.serialize({'return': ret})) self.finish() @tornado.gen.coroutine def _disbatch_local(self, chunk): ''' Dispatch local client commands ''' # Generate jid and find all minions before triggering a job to subscribe all returns from minions full_return = chunk.pop('full_return', False) chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts) if not chunk.get('jid', None) else chunk['jid'] minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob'))) def subscribe_minion(minion): salt_evt = self.application.event_listener.get_event( self, tag='salt/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) syndic_evt = self.application.event_listener.get_event( self, tag='syndic/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) return salt_evt, syndic_evt # start listening for the event before we fire the job to avoid races events = [] for minion in minions: salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) # if the job didn't publish, lets not wait around for nothing # TODO: set header?? if 'jid' not in pub_data: for future in events: try: future.set_result(None) except Exception: pass raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.') # get_event for missing minion for minion in list(set(pub_data['minions']) - set(minions)): salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) # Map of minion_id -> returned for all minions we think we need to wait on minions = {m: False for m in pub_data['minions']} # minimum time required for return to complete. By default no waiting, if # we are a syndic then we must wait syndic_wait at a minimum min_wait_time = Future() min_wait_time.set_result(True) # wait syndic a while to avoid missing published events if self.application.opts['order_masters']: min_wait_time = tornado.gen.sleep(self.application.opts['syndic_wait']) # To ensure job_not_running and all_return are terminated by each other, communicate using a future is_timed_out = tornado.gen.sleep(self.application.opts['gather_job_timeout']) is_finished = Future() # ping until the job is not running, while doing so, if we see new minions returning # that they are running the job, add them to the list tornado.ioloop.IOLoop.current().spawn_callback(self.job_not_running, pub_data['jid'], chunk['tgt'], f_call['kwargs']['tgt_type'], minions, is_finished) def more_todo(): ''' Check if there are any more minions we are waiting on returns from ''' return any(x is False for x in six.itervalues(minions)) # here we want to follow the behavior of LocalClient.get_iter_returns # namely we want to wait at least syndic_wait (assuming we are a syndic) # and that there are no more jobs running on minions. We are allowed to exit # early if gather_job_timeout has been exceeded chunk_ret = {} while True: to_wait = events+[is_finished, is_timed_out] if not min_wait_time.done(): to_wait += [min_wait_time] def cancel_inflight_futures(): for event in to_wait: if not event.done() and event is not is_timed_out: event.set_result(None) f = yield Any(to_wait) try: # When finished entire routine, cleanup other futures and return result if f is is_finished or f is is_timed_out: cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) elif f is min_wait_time: if not more_todo(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) continue f_result = f.result() if f in events: events.remove(f) # if this is a start, then we need to add it to the pile if f_result['tag'].endswith('/new'): for minion_id in f_result['data']['minions']: if minion_id not in minions: minions[minion_id] = False else: chunk_ret[f_result['data']['id']] = f_result if full_return else f_result['data']['return'] # clear finished event future minions[f_result['data']['id']] = True # if there are no more minions to wait for, then we are done if not more_todo() and min_wait_time.done(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) except TimeoutException: break @tornado.gen.coroutine def job_not_running(self, jid, tgt, tgt_type, minions, is_finished): ''' Return a future which will complete once jid (passed in) is no longer running on tgt ''' ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False while True: try: event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) event = yield event except TimeoutException: if not event.done(): event.set_result(None) if not minion_running or is_finished.done(): raise tornado.gen.Return(True) else: ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False continue # Minions can return, we want to see if the job is running... if event['data'].get('return', {}) == {}: continue if event['data']['id'] not in minions: minions[event['data']['id']] = False minion_running = True @tornado.gen.coroutine def _disbatch_local_async(self, chunk): ''' Disbatch local client_async commands ''' f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {})) raise tornado.gen.Return(pub_data) @tornado.gen.coroutine def _disbatch_runner(self, chunk): ''' Disbatch runner client commands ''' full_return = chunk.pop('full_return', False) pub_data = self.saltclients['runner'](chunk) tag = pub_data['tag'] + '/ret' try: event = yield self.application.event_listener.get_event(self, tag=tag) # only return the return data ret = event if full_return else event['data']['return'] raise tornado.gen.Return(ret) except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute') @tornado.gen.coroutine def _disbatch_runner_async(self, chunk): ''' Disbatch runner client_async commands ''' pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data) # salt.utils.args.format_call doesn't work for functions having the # annotation tornado.gen.coroutine def _format_call_run_job_async(self, chunk): f_call = salt.utils.args.format_call( salt.client.LocalClient.run_job, chunk, is_class_method=True) f_call.get('kwargs', {})['io_loop'] = tornado.ioloop.IOLoop.current() return f_call class MinionSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' A convenience endpoint for minion related functions ''' @tornado.web.asynchronous def get(self, mid=None): # pylint: disable=W0221 ''' A convenience URL for getting lists of minions or getting minion details .. http:get:: /minions/(mid) :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/minions/ms-3 .. code-block:: text GET /minions/ms-3 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 129005 Content-Type: application/x-yaml return: - ms-3: grains.items: ... ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.lowstate = [{ 'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items', }] self.disbatch() @tornado.web.asynchronous def post(self): ''' Start an execution command and immediately return the job id .. http:post:: /minions :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. The ``client`` option will be set to :py:meth:`~salt.client.LocalClient.local_async`. **Example request:** .. code-block:: bash curl -sSi localhost:8000/minions \\ -H "Accept: application/x-yaml" \\ -d tgt='*' \\ -d fun='status.diskusage' .. code-block:: text POST /minions HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 26 Content-Type: application/x-www-form-urlencoded tgt=*&fun=status.diskusage **Example response:** .. code-block:: text HTTP/1.1 202 Accepted Content-Length: 86 Content-Type: application/x-yaml return: - jid: '20130603122505459265' minions: [ms-4, ms-3, ms-2, ms-1, ms-0] ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return # verify that all lowstates are the correct client type for low in self.lowstate: # if you didn't specify, its fine if 'client' not in low: low['client'] = 'local_async' continue # if you specified something else, we don't do that if low.get('client') != 'local_async': self.set_status(400) self.write('We don\'t serve your kind here') self.finish() return self.disbatch() class JobsSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' A convenience endpoint for job cache data ''' @tornado.web.asynchronous def get(self, jid=None): # pylint: disable=W0221 ''' A convenience URL for getting lists of previously run jobs or getting the return from a single job .. http:get:: /jobs/(jid) List jobs or show a single job from the job cache. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/jobs .. code-block:: text GET /jobs HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 165 Content-Type: application/x-yaml return: - '20121130104633606931': Arguments: - '3' Function: test.fib Start Time: 2012, Nov 30 10:46:33.606931 Target: jerry Target-type: glob **Example request:** .. code-block:: bash curl -i localhost:8000/jobs/20121130104633606931 .. code-block:: text GET /jobs/20121130104633606931 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml info: - Arguments: - '3' Function: test.fib Minions: - jerry Start Time: 2012, Nov 30 10:46:33.606931 Target: '*' Target-type: glob User: saltdev jid: '20121130104633606931' return: - jerry: - - 0 - 1 - 1 - 2 - 6.9141387939453125e-06 ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return if jid: self.lowstate = [{ 'fun': 'jobs.list_job', 'jid': jid, 'client': 'runner', }] else: self.lowstate = [{ 'fun': 'jobs.list_jobs', 'client': 'runner', }] self.disbatch() class RunSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' Endpoint to run commands without normal session handling ''' @tornado.web.asynchronous def post(self): ''' Run commands bypassing the :ref:`normal session handling <rest_cherrypy-auth>` .. http:post:: /run This entry point is primarily for "one-off" commands. Each request must pass full Salt authentication credentials. Otherwise this URL is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`. :term:`lowstate` data describing Salt commands must be sent in the request body. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -sS localhost:8000/run \\ -H 'Accept: application/x-yaml' \\ -d client='local' \\ -d tgt='*' \\ -d fun='test.ping' \\ -d username='saltdev' \\ -d password='saltdev' \\ -d eauth='pam' .. code-block:: text POST /run HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 75 Content-Type: application/x-www-form-urlencoded client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true ''' self.disbatch() class EventsSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' Expose the Salt event bus The event bus on the Salt master exposes a large variety of things, notably when executions are started on the master and also when minions ultimately return their results. This URL provides a real-time window into a running Salt infrastructure. .. seealso:: :ref:`events` ''' @tornado.gen.coroutine def get(self): r''' An HTTP stream of the Salt master event bus This stream is formatted per the Server Sent Events (SSE) spec. Each event is formatted as JSON. .. http:get:: /events :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -NsS localhost:8000/events .. code-block:: text GET /events HTTP/1.1 Host: localhost:8000 **Example response:** .. code-block:: text HTTP/1.1 200 OK Connection: keep-alive Cache-Control: no-cache Content-Type: text/event-stream;charset=utf-8 retry: 400 data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}} data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}} The event stream can be easily consumed via JavaScript: .. code-block:: javascript # Note, you must be authenticated! var source = new EventSource('/events'); source.onopen = function() { console.debug('opening') }; source.onerror = function(e) { console.debug('error!', e) }; source.onmessage = function(e) { console.debug(e.data) }; Or using CORS: .. code-block:: javascript var source = new EventSource('/events', {withCredentials: true}); Some browser clients lack CORS support for the ``EventSource()`` API. Such clients may instead pass the :mailheader:`X-Auth-Token` value as an URL parameter: .. code-block:: bash curl -NsS localhost:8000/events/6d1b722e It is also possible to consume the stream via the shell. Records are separated by blank lines; the ``data:`` and ``tag:`` prefixes will need to be removed manually before attempting to unserialize the JSON. curl's ``-N`` flag turns off input buffering which is required to process the stream incrementally. Here is a basic example of printing each event as it comes in: .. code-block:: bash curl -NsS localhost:8000/events |\ while IFS= read -r line ; do echo $line done Here is an example of using awk to filter events based on tag: .. code-block:: bash curl -NsS localhost:8000/events |\ awk ' BEGIN { RS=""; FS="\\n" } $1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 } ' tag: salt/job/20140112010149808995/new data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}} tag: 20140112010149808995 data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}} ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return # set the streaming headers self.set_header('Content-Type', 'text/event-stream') self.set_header('Cache-Control', 'no-cache') self.set_header('Connection', 'keep-alive') self.write('retry: {0}\n'.format(400)) self.flush() while True: try: event = yield self.application.event_listener.get_event(self) self.write('tag: {0}\n'.format(event.get('tag', ''))) self.write(str('data: {0}\n\n').format(_json_dumps(event))) # future lint: disable=blacklisted-function self.flush() except TimeoutException: break class WebhookSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' A generic web hook entry point that fires an event on Salt's event bus External services can POST data to this URL to trigger an event in Salt. For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks. .. note:: Be mindful of security Salt's Reactor can run any code. A Reactor SLS that responds to a hook event is responsible for validating that the event came from a trusted source and contains valid data. **This is a generic interface and securing it is up to you!** This URL requires authentication however not all external services can be configured to authenticate. For this reason authentication can be selectively disabled for this URL. Follow best practices -- always use SSL, pass a secret key, configure the firewall to only allow traffic from a known source, etc. The event data is taken from the request body. The :mailheader:`Content-Type` header is respected for the payload. The event tag is prefixed with ``salt/netapi/hook`` and the URL path is appended to the end. For example, a ``POST`` request sent to ``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag ``salt/netapi/hook/mycompany/myapp/mydata``. The following is an example ``.travis.yml`` file to send notifications to Salt of successful test runs: .. code-block:: yaml language: python script: python -m unittest tests after_success: - 'curl -sS http://saltapi-url.example.com:8000/hook/travis/build/success -d branch="${TRAVIS_BRANCH}" -d commit="${TRAVIS_COMMIT}"' .. seealso:: :ref:`events`, :ref:`reactor` ''' def post(self, tag_suffix=None): # pylint: disable=W0221 ''' Fire an event in Salt with a custom event tag and data .. http:post:: /hook :status 200: |200| :status 401: |401| :status 406: |406| :status 413: request body is too large **Example request:** .. code-block:: bash curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!' .. code-block:: text POST /hook HTTP/1.1 Host: localhost:8000 Content-Length: 16 Content-Type: application/x-www-form-urlencoded foo=Foo&bar=Bar! **Example response**: .. code-block:: text HTTP/1.1 200 OK Content-Length: 14 Content-Type: application/json {"success": true} As a practical example, an internal continuous-integration build server could send an HTTP POST request to the URL ``http://localhost:8000/hook/mycompany/build/success`` which contains the result of a build and the SHA of the version that was built as JSON. That would then produce the following event in Salt that could be used to kick off a deployment via Salt's Reactor: .. code-block:: text Event fired at Fri Feb 14 17:40:11 2014 ************************* Tag: salt/netapi/hook/mycompany/build/success Data: {'_stamp': '2014-02-14_17:40:11.440996', 'headers': { 'X-My-Secret-Key': 'F0fAgoQjIT@W', 'Content-Length': '37', 'Content-Type': 'application/json', 'Host': 'localhost:8000', 'Remote-Addr': '127.0.0.1'}, 'post': {'revision': 'aa22a3c4b2e7', 'result': True}} Salt's Reactor could listen for the event: .. code-block:: yaml reactor: - 'salt/netapi/hook/mycompany/build/*': - /srv/reactor/react_ci_builds.sls And finally deploy the new build: .. code-block:: jinja {% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %} {% set build = data.get('post', {}) %} {% if secret_key == 'F0fAgoQjIT@W' and build.result == True %} deploy_my_app: cmd.state.sls: - tgt: 'application*' - arg: - myapp.deploy - kwarg: pillar: revision: {{ revision }} {% endif %} ''' disable_auth = self.application.mod_opts.get('webhook_disable_auth') if not disable_auth and not self._verify_auth(): self.redirect('/login') return # if you have the tag, prefix tag = 'salt/netapi/hook' if tag_suffix: tag += tag_suffix # TODO: consolidate?? self.event = salt.utils.event.get_event( 'master', self.application.opts['sock_dir'], self.application.opts['transport'], opts=self.application.opts, listen=False) arguments = {} for argname in self.request.query_arguments: value = self.get_arguments(argname) if len(value) == 1: value = value[0] arguments[argname] = value ret = self.event.fire_event({ 'post': self.request_payload, 'get': arguments, # In Tornado >= v4.0.3, the headers come # back as an HTTPHeaders instance, which # is a dictionary. We must cast this as # a dictionary in order for msgpack to # serialize it. 'headers': dict(self.request.headers), }, tag) self.write(self.serialize({'success': ret}))
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
EventListener.clean_by_request
python
def clean_by_request(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, matcher, future in self.request_map[request]: # timeout the future self._timeout_future(tag, matcher, future) # remove the timeout if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] del self.request_map[request]
Remove all futures that were waiting for request `request` since it is done waiting
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L302-L316
[ "def _timeout_future(self, tag, matcher, future):\n '''\n Timeout a specific future\n '''\n if (tag, matcher) not in self.tag_map:\n return\n if not future.done():\n future.set_exception(TimeoutException())\n self.tag_map[(tag, matcher)].remove(future)\n if not self.tag_map[(tag, matcher)]:\n del self.tag_map[(tag, matcher)]\n" ]
class EventListener(object): ''' Class responsible for listening to the salt master event bus and updating futures. This is the core of what makes this asynchronous, this allows us to do non-blocking work in the main processes and "wait" for an event to happen ''' def __init__(self, mod_opts, opts): self.mod_opts = mod_opts self.opts = opts self.event = salt.utils.event.get_event( 'master', opts['sock_dir'], opts['transport'], opts=opts, listen=True, io_loop=tornado.ioloop.IOLoop.current() ) # tag -> list of futures self.tag_map = defaultdict(list) # request_obj -> list of (tag, future) self.request_map = defaultdict(list) # map of future -> timeout_callback self.timeout_map = {} self.event.set_event_handler(self._handle_event_socket_recv) @staticmethod def prefix_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag.startswith(tag) @staticmethod def exact_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag == tag def get_event(self, request, tag='', matcher=prefix_matcher.__func__, callback=None, timeout=None ): ''' Get an event (asynchronous of course) return a future that will get it later ''' # if the request finished, no reason to allow event fetching, since we # can't send back to the client if request._finished: future = Future() future.set_exception(TimeoutException()) return future future = Future() if callback is not None: def handle_future(future): tornado.ioloop.IOLoop.current().add_callback(callback, future) future.add_done_callback(handle_future) # add this tag and future to the callbacks self.tag_map[(tag, matcher)].append(future) self.request_map[request].append((tag, matcher, future)) if timeout: timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, matcher, future) self.timeout_map[future] = timeout_future return future def _timeout_future(self, tag, matcher, future): ''' Timeout a specific future ''' if (tag, matcher) not in self.tag_map: return if not future.done(): future.set_exception(TimeoutException()) self.tag_map[(tag, matcher)].remove(future) if not self.tag_map[(tag, matcher)]: del self.tag_map[(tag, matcher)] def _handle_event_socket_recv(self, raw): ''' Callback for events on the event sub socket ''' mtag, data = self.event.unpack(raw, self.event.serial) # see if we have any futures that need this info: for (tag, matcher), futures in six.iteritems(self.tag_map): try: is_matched = matcher(mtag, tag) except Exception: log.error('Failed to run a matcher.', exc_info=True) is_matched = False if not is_matched: continue for future in futures: if future.done(): continue future.set_result({'data': data, 'tag': mtag}) self.tag_map[(tag, matcher)].remove(future) if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future]
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
EventListener.get_event
python
def get_event(self, request, tag='', matcher=prefix_matcher.__func__, callback=None, timeout=None ): ''' Get an event (asynchronous of course) return a future that will get it later ''' # if the request finished, no reason to allow event fetching, since we # can't send back to the client if request._finished: future = Future() future.set_exception(TimeoutException()) return future future = Future() if callback is not None: def handle_future(future): tornado.ioloop.IOLoop.current().add_callback(callback, future) future.add_done_callback(handle_future) # add this tag and future to the callbacks self.tag_map[(tag, matcher)].append(future) self.request_map[request].append((tag, matcher, future)) if timeout: timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, matcher, future) self.timeout_map[future] = timeout_future return future
Get an event (asynchronous of course) return a future that will get it later
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L330-L360
null
class EventListener(object): ''' Class responsible for listening to the salt master event bus and updating futures. This is the core of what makes this asynchronous, this allows us to do non-blocking work in the main processes and "wait" for an event to happen ''' def __init__(self, mod_opts, opts): self.mod_opts = mod_opts self.opts = opts self.event = salt.utils.event.get_event( 'master', opts['sock_dir'], opts['transport'], opts=opts, listen=True, io_loop=tornado.ioloop.IOLoop.current() ) # tag -> list of futures self.tag_map = defaultdict(list) # request_obj -> list of (tag, future) self.request_map = defaultdict(list) # map of future -> timeout_callback self.timeout_map = {} self.event.set_event_handler(self._handle_event_socket_recv) def clean_by_request(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, matcher, future in self.request_map[request]: # timeout the future self._timeout_future(tag, matcher, future) # remove the timeout if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] del self.request_map[request] @staticmethod def prefix_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag.startswith(tag) @staticmethod def exact_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag == tag def _timeout_future(self, tag, matcher, future): ''' Timeout a specific future ''' if (tag, matcher) not in self.tag_map: return if not future.done(): future.set_exception(TimeoutException()) self.tag_map[(tag, matcher)].remove(future) if not self.tag_map[(tag, matcher)]: del self.tag_map[(tag, matcher)] def _handle_event_socket_recv(self, raw): ''' Callback for events on the event sub socket ''' mtag, data = self.event.unpack(raw, self.event.serial) # see if we have any futures that need this info: for (tag, matcher), futures in six.iteritems(self.tag_map): try: is_matched = matcher(mtag, tag) except Exception: log.error('Failed to run a matcher.', exc_info=True) is_matched = False if not is_matched: continue for future in futures: if future.done(): continue future.set_result({'data': data, 'tag': mtag}) self.tag_map[(tag, matcher)].remove(future) if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future]
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
EventListener._timeout_future
python
def _timeout_future(self, tag, matcher, future): ''' Timeout a specific future ''' if (tag, matcher) not in self.tag_map: return if not future.done(): future.set_exception(TimeoutException()) self.tag_map[(tag, matcher)].remove(future) if not self.tag_map[(tag, matcher)]: del self.tag_map[(tag, matcher)]
Timeout a specific future
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L362-L372
null
class EventListener(object): ''' Class responsible for listening to the salt master event bus and updating futures. This is the core of what makes this asynchronous, this allows us to do non-blocking work in the main processes and "wait" for an event to happen ''' def __init__(self, mod_opts, opts): self.mod_opts = mod_opts self.opts = opts self.event = salt.utils.event.get_event( 'master', opts['sock_dir'], opts['transport'], opts=opts, listen=True, io_loop=tornado.ioloop.IOLoop.current() ) # tag -> list of futures self.tag_map = defaultdict(list) # request_obj -> list of (tag, future) self.request_map = defaultdict(list) # map of future -> timeout_callback self.timeout_map = {} self.event.set_event_handler(self._handle_event_socket_recv) def clean_by_request(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, matcher, future in self.request_map[request]: # timeout the future self._timeout_future(tag, matcher, future) # remove the timeout if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] del self.request_map[request] @staticmethod def prefix_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag.startswith(tag) @staticmethod def exact_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag == tag def get_event(self, request, tag='', matcher=prefix_matcher.__func__, callback=None, timeout=None ): ''' Get an event (asynchronous of course) return a future that will get it later ''' # if the request finished, no reason to allow event fetching, since we # can't send back to the client if request._finished: future = Future() future.set_exception(TimeoutException()) return future future = Future() if callback is not None: def handle_future(future): tornado.ioloop.IOLoop.current().add_callback(callback, future) future.add_done_callback(handle_future) # add this tag and future to the callbacks self.tag_map[(tag, matcher)].append(future) self.request_map[request].append((tag, matcher, future)) if timeout: timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, matcher, future) self.timeout_map[future] = timeout_future return future def _handle_event_socket_recv(self, raw): ''' Callback for events on the event sub socket ''' mtag, data = self.event.unpack(raw, self.event.serial) # see if we have any futures that need this info: for (tag, matcher), futures in six.iteritems(self.tag_map): try: is_matched = matcher(mtag, tag) except Exception: log.error('Failed to run a matcher.', exc_info=True) is_matched = False if not is_matched: continue for future in futures: if future.done(): continue future.set_result({'data': data, 'tag': mtag}) self.tag_map[(tag, matcher)].remove(future) if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future]
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
EventListener._handle_event_socket_recv
python
def _handle_event_socket_recv(self, raw): ''' Callback for events on the event sub socket ''' mtag, data = self.event.unpack(raw, self.event.serial) # see if we have any futures that need this info: for (tag, matcher), futures in six.iteritems(self.tag_map): try: is_matched = matcher(mtag, tag) except Exception: log.error('Failed to run a matcher.', exc_info=True) is_matched = False if not is_matched: continue for future in futures: if future.done(): continue future.set_result({'data': data, 'tag': mtag}) self.tag_map[(tag, matcher)].remove(future) if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future]
Callback for events on the event sub socket
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L374-L398
null
class EventListener(object): ''' Class responsible for listening to the salt master event bus and updating futures. This is the core of what makes this asynchronous, this allows us to do non-blocking work in the main processes and "wait" for an event to happen ''' def __init__(self, mod_opts, opts): self.mod_opts = mod_opts self.opts = opts self.event = salt.utils.event.get_event( 'master', opts['sock_dir'], opts['transport'], opts=opts, listen=True, io_loop=tornado.ioloop.IOLoop.current() ) # tag -> list of futures self.tag_map = defaultdict(list) # request_obj -> list of (tag, future) self.request_map = defaultdict(list) # map of future -> timeout_callback self.timeout_map = {} self.event.set_event_handler(self._handle_event_socket_recv) def clean_by_request(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, matcher, future in self.request_map[request]: # timeout the future self._timeout_future(tag, matcher, future) # remove the timeout if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] del self.request_map[request] @staticmethod def prefix_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag.startswith(tag) @staticmethod def exact_matcher(mtag, tag): if mtag is None or tag is None: raise TypeError('mtag or tag can not be None') return mtag == tag def get_event(self, request, tag='', matcher=prefix_matcher.__func__, callback=None, timeout=None ): ''' Get an event (asynchronous of course) return a future that will get it later ''' # if the request finished, no reason to allow event fetching, since we # can't send back to the client if request._finished: future = Future() future.set_exception(TimeoutException()) return future future = Future() if callback is not None: def handle_future(future): tornado.ioloop.IOLoop.current().add_callback(callback, future) future.add_done_callback(handle_future) # add this tag and future to the callbacks self.tag_map[(tag, matcher)].append(future) self.request_map[request].append((tag, matcher, future)) if timeout: timeout_future = tornado.ioloop.IOLoop.current().call_later(timeout, self._timeout_future, tag, matcher, future) self.timeout_map[future] = timeout_future return future def _timeout_future(self, tag, matcher, future): ''' Timeout a specific future ''' if (tag, matcher) not in self.tag_map: return if not future.done(): future.set_exception(TimeoutException()) self.tag_map[(tag, matcher)].remove(future) if not self.tag_map[(tag, matcher)]: del self.tag_map[(tag, matcher)]
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler._verify_client
python
def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True
Verify that the client is in fact one we have
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L407-L416
null
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler.initialize
python
def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts)
Initialize the handler before requests are called
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L418-L440
[ "def get_local_client(\n c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),\n mopts=None,\n skip_perm_errors=False,\n io_loop=None,\n auto_reconnect=False):\n '''\n .. versionadded:: 2014.7.0\n\n Read in the config and return the correct LocalClient object based on\n the configured transport\n\n :param IOLoop io_loop: io_loop used for events.\n Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n if mopts:\n opts = mopts\n else:\n # Late import to prevent circular import\n import salt.config\n opts = salt.config.client_config(c_path)\n\n # TODO: AIO core is separate from transport\n return LocalClient(\n mopts=opts,\n skip_perm_errors=skip_perm_errors,\n io_loop=io_loop,\n auto_reconnect=auto_reconnect)\n" ]
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler.token
python
def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME)
The token used for the request
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L443-L451
null
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler._verify_auth
python
def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token))
Boolean whether the request is auth'd
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L453-L458
null
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler.prepare
python
def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate()
Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L460-L490
[ "def _get_lowstate(self):\n '''\n Format the incoming data into a lowstate object\n '''\n if not self.request.body:\n return\n data = self.deserialize(self.request.body)\n self.request_payload = copy(data)\n\n if data and 'arg' in data and not isinstance(data['arg'], list):\n data['arg'] = [data['arg']]\n\n if not isinstance(data, list):\n lowstate = [data]\n else:\n lowstate = data\n\n return lowstate\n", "def find_acceptable_content_type(parsed_accept_header):\n for media_range in parsed_accept_header:\n for content_type, dumper in self.ct_out_map:\n if fnmatch.fnmatch(content_type, media_range):\n return content_type, dumper\n return None, None\n" ]
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler.serialize
python
def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data)
Serlialize the output based on the Accept header
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L514-L520
null
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler._form_loader
python
def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data
function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L522-L534
null
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler.deserialize
python
def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400)
Deserialize the data based on request content type headers
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L536-L556
null
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler._get_lowstate
python
def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate
Format the incoming data into a lowstate object
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L558-L575
[ "def deserialize(self, data):\n '''\n Deserialize the data based on request content type headers\n '''\n ct_in_map = {\n 'application/x-www-form-urlencoded': self._form_loader,\n 'application/json': salt.utils.json.loads,\n 'application/x-yaml': salt.utils.yaml.safe_load,\n 'text/yaml': salt.utils.yaml.safe_load,\n # because people are terrible and don't mean what they say\n 'text/plain': salt.utils.json.loads\n }\n\n try:\n # Use cgi.parse_header to correctly separate parameters from value\n value, parameters = cgi.parse_header(self.request.headers['Content-Type'])\n return ct_in_map[value](tornado.escape.native_str(data))\n except KeyError:\n self.send_error(406)\n except ValueError:\n self.send_error(400)\n" ]
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin) def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler.set_default_headers
python
def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin)
Set default CORS headers
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L577-L589
[ "def _check_cors_origin(origin, allowed_origins):\n '''\n Check if an origin match cors allowed origins\n '''\n if isinstance(allowed_origins, list):\n if origin in allowed_origins:\n return origin\n elif allowed_origins == '*':\n return allowed_origins\n elif allowed_origins == origin:\n # Cors origin is either * or specific origin\n return allowed_origins\n" ]
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
BaseSaltAPIHandler.options
python
def options(self, *args, **kwargs): ''' Return CORS headers for preflight requests ''' # Allow X-Auth-Token in requests request_headers = self.request.headers.get('Access-Control-Request-Headers') allowed_headers = request_headers.split(',') # Filter allowed header here if needed. # Allow request headers self.set_header('Access-Control-Allow-Headers', ','.join(allowed_headers)) # Allow X-Auth-Token in responses self.set_header('Access-Control-Expose-Headers', 'X-Auth-Token') # Allow all methods self.set_header('Access-Control-Allow-Methods', 'OPTIONS, GET, POST') self.set_status(204) self.finish()
Return CORS headers for preflight requests
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L591-L611
null
class BaseSaltAPIHandler(tornado.web.RequestHandler): # pylint: disable=W0223 ct_out_map = ( ('application/json', _json_dumps), ('application/x-yaml', salt.utils.yaml.safe_dump), ) def _verify_client(self, low): ''' Verify that the client is in fact one we have ''' if 'client' not in low or low.get('client') not in self.saltclients: self.set_status(400) self.write("400 Invalid Client: Client not found in salt clients") self.finish() return False return True def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts) @property def token(self): ''' The token used for the request ''' # find the token (cookie or headers) if AUTH_TOKEN_HEADER in self.request.headers: return self.request.headers[AUTH_TOKEN_HEADER] else: return self.get_cookie(AUTH_COOKIE_NAME) def _verify_auth(self): ''' Boolean whether the request is auth'd ''' return self.token and bool(self.application.auth.get_tok(self.token)) def prepare(self): ''' Run before get/posts etc. Pre-flight checks: - verify that we can speak back to them (compatible accept header) ''' # Find an acceptable content-type accept_header = self.request.headers.get('Accept', '*/*') # Ignore any parameter, including q (quality) one parsed_accept_header = [cgi.parse_header(h)[0] for h in accept_header.split(',')] def find_acceptable_content_type(parsed_accept_header): for media_range in parsed_accept_header: for content_type, dumper in self.ct_out_map: if fnmatch.fnmatch(content_type, media_range): return content_type, dumper return None, None content_type, dumper = find_acceptable_content_type(parsed_accept_header) # better return message? if not content_type: self.send_error(406) self.content_type = content_type self.dumper = dumper # do the common parts self.start = time.time() self.connected = True self.lowstate = self._get_lowstate() def timeout_futures(self): ''' timeout a session ''' # TODO: set a header or something??? so we know it was a timeout self.application.event_listener.clean_by_request(self) def on_finish(self): ''' When the job has been done, lets cleanup ''' # timeout all the futures self.timeout_futures() # clear local_client objects to disconnect event publisher's IOStream connections del self.saltclients def on_connection_close(self): ''' If the client disconnects, lets close out ''' self.finish() def serialize(self, data): ''' Serlialize the output based on the Accept header ''' self.set_header('Content-Type', self.content_type) return self.dumper(data) def _form_loader(self, _): ''' function to get the data from the urlencoded forms ignore the data passed in and just get the args from wherever they are ''' data = {} for key in self.request.arguments: val = self.get_arguments(key) if len(val) == 1: data[key] = val[0] else: data[key] = val return data def deserialize(self, data): ''' Deserialize the data based on request content type headers ''' ct_in_map = { 'application/x-www-form-urlencoded': self._form_loader, 'application/json': salt.utils.json.loads, 'application/x-yaml': salt.utils.yaml.safe_load, 'text/yaml': salt.utils.yaml.safe_load, # because people are terrible and don't mean what they say 'text/plain': salt.utils.json.loads } try: # Use cgi.parse_header to correctly separate parameters from value value, parameters = cgi.parse_header(self.request.headers['Content-Type']) return ct_in_map[value](tornado.escape.native_str(data)) except KeyError: self.send_error(406) except ValueError: self.send_error(400) def _get_lowstate(self): ''' Format the incoming data into a lowstate object ''' if not self.request.body: return data = self.deserialize(self.request.body) self.request_payload = copy(data) if data and 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] if not isinstance(data, list): lowstate = [data] else: lowstate = data return lowstate def set_default_headers(self): ''' Set default CORS headers ''' mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin)
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
SaltAuthHandler.get
python
def get(self): ''' All logins are done over post, this is a parked endpoint .. http:get:: /login :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/login .. code-block:: text GET /login HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 401 Unauthorized Content-Type: application/json Content-Length: 58 {"status": "401 Unauthorized", "return": "Please log in"} ''' self.set_status(401) self.set_header('WWW-Authenticate', 'Session') ret = {'status': '401 Unauthorized', 'return': 'Please log in'} self.write(self.serialize(ret))
All logins are done over post, this is a parked endpoint .. http:get:: /login :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/login .. code-block:: text GET /login HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 401 Unauthorized Content-Type: application/json Content-Length: 58 {"status": "401 Unauthorized", "return": "Please log in"}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L618-L655
[ "def serialize(self, data):\n '''\n Serlialize the output based on the Accept header\n '''\n self.set_header('Content-Type', self.content_type)\n\n return self.dumper(data)\n" ]
class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Handler for login requests ''' # TODO: make asynchronous? Underlying library isn't... and we ARE making disk calls :( def post(self): ''' :ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system .. http:post:: /login :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :form eauth: the eauth backend configured for the user :form username: username :form password: password :status 200: |200| :status 400: |400| :status 401: |401| :status 406: |406| :status 500: |500| **Example request:** .. code-block:: bash curl -si localhost:8000/login \\ -H "Accept: application/json" \\ -d username='saltuser' \\ -d password='saltpass' \\ -d eauth='pam' .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Content-Length: 42 Content-Type: application/x-www-form-urlencoded Accept: application/json username=saltuser&password=saltpass&eauth=pam **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Length: 206 X-Auth-Token: 6d1b722e Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/ {"return": { "token": "6d1b722e", "start": 1363805943.776223, "expire": 1363849143.776224, "user": "saltuser", "eauth": "pam", "perms": [ "grains.*", "status.*", "sys.*", "test.*" ] }} ''' try: if not isinstance(self.request_payload, dict): self.send_error(400) return creds = {'username': self.request_payload['username'], 'password': self.request_payload['password'], 'eauth': self.request_payload['eauth'], } # if any of the args are missing, its a bad request except KeyError: self.send_error(400) return token = self.application.auth.mk_token(creds) if 'token' not in token: # TODO: nicer error message # 'Could not authenticate using provided credentials') self.send_error(401) # return since we don't want to execute any more return # Grab eauth config for the current backend for the current user try: eauth = self.application.opts['external_auth'][token['eauth']] # Get sum of '*' perms, user-specific perms, and group-specific perms _perms = eauth.get(token['name'], []) _perms.extend(eauth.get('*', [])) if 'groups' in token and token['groups']: user_groups = set(token['groups']) eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')]) for group in user_groups & eauth_groups: _perms.extend(eauth['{0}%'.format(group)]) # dedup. perm can be a complex dict, so we cant use set perms = [] for perm in _perms: if perm not in perms: perms.append(perm) # If we can't find the creds, then they aren't authorized except KeyError: self.send_error(401) return except (AttributeError, IndexError): log.debug( "Configuration for external_auth malformed for eauth '%s', " "and user '%s'.", token.get('eauth'), token.get('name'), exc_info=True ) # TODO better error -- 'Configuration for external_auth could not be read.' self.send_error(500) return ret = {'return': [{ 'token': token['token'], 'expire': token['expire'], 'start': token['start'], 'user': token['name'], 'eauth': token['eauth'], 'perms': perms, }]} self.write(self.serialize(ret))
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
SaltAuthHandler.post
python
def post(self): ''' :ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system .. http:post:: /login :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :form eauth: the eauth backend configured for the user :form username: username :form password: password :status 200: |200| :status 400: |400| :status 401: |401| :status 406: |406| :status 500: |500| **Example request:** .. code-block:: bash curl -si localhost:8000/login \\ -H "Accept: application/json" \\ -d username='saltuser' \\ -d password='saltpass' \\ -d eauth='pam' .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Content-Length: 42 Content-Type: application/x-www-form-urlencoded Accept: application/json username=saltuser&password=saltpass&eauth=pam **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Length: 206 X-Auth-Token: 6d1b722e Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/ {"return": { "token": "6d1b722e", "start": 1363805943.776223, "expire": 1363849143.776224, "user": "saltuser", "eauth": "pam", "perms": [ "grains.*", "status.*", "sys.*", "test.*" ] }} ''' try: if not isinstance(self.request_payload, dict): self.send_error(400) return creds = {'username': self.request_payload['username'], 'password': self.request_payload['password'], 'eauth': self.request_payload['eauth'], } # if any of the args are missing, its a bad request except KeyError: self.send_error(400) return token = self.application.auth.mk_token(creds) if 'token' not in token: # TODO: nicer error message # 'Could not authenticate using provided credentials') self.send_error(401) # return since we don't want to execute any more return # Grab eauth config for the current backend for the current user try: eauth = self.application.opts['external_auth'][token['eauth']] # Get sum of '*' perms, user-specific perms, and group-specific perms _perms = eauth.get(token['name'], []) _perms.extend(eauth.get('*', [])) if 'groups' in token and token['groups']: user_groups = set(token['groups']) eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')]) for group in user_groups & eauth_groups: _perms.extend(eauth['{0}%'.format(group)]) # dedup. perm can be a complex dict, so we cant use set perms = [] for perm in _perms: if perm not in perms: perms.append(perm) # If we can't find the creds, then they aren't authorized except KeyError: self.send_error(401) return except (AttributeError, IndexError): log.debug( "Configuration for external_auth malformed for eauth '%s', " "and user '%s'.", token.get('eauth'), token.get('name'), exc_info=True ) # TODO better error -- 'Configuration for external_auth could not be read.' self.send_error(500) return ret = {'return': [{ 'token': token['token'], 'expire': token['expire'], 'start': token['start'], 'user': token['name'], 'eauth': token['eauth'], 'perms': perms, }]} self.write(self.serialize(ret))
:ref:`Authenticate <rest_tornado-auth>` against Salt's eauth system .. http:post:: /login :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :form eauth: the eauth backend configured for the user :form username: username :form password: password :status 200: |200| :status 400: |400| :status 401: |401| :status 406: |406| :status 500: |500| **Example request:** .. code-block:: bash curl -si localhost:8000/login \\ -H "Accept: application/json" \\ -d username='saltuser' \\ -d password='saltpass' \\ -d eauth='pam' .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Content-Length: 42 Content-Type: application/x-www-form-urlencoded Accept: application/json username=saltuser&password=saltpass&eauth=pam **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Length: 206 X-Auth-Token: 6d1b722e Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/ {"return": { "token": "6d1b722e", "start": 1363805943.776223, "expire": 1363849143.776224, "user": "saltuser", "eauth": "pam", "perms": [ "grains.*", "status.*", "sys.*", "test.*" ] }}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L658-L788
[ "def serialize(self, data):\n '''\n Serlialize the output based on the Accept header\n '''\n self.set_header('Content-Type', self.content_type)\n\n return self.dumper(data)\n" ]
class SaltAuthHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Handler for login requests ''' def get(self): ''' All logins are done over post, this is a parked endpoint .. http:get:: /login :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/login .. code-block:: text GET /login HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 401 Unauthorized Content-Type: application/json Content-Length: 58 {"status": "401 Unauthorized", "return": "Please log in"} ''' self.set_status(401) self.set_header('WWW-Authenticate', 'Session') ret = {'status': '401 Unauthorized', 'return': 'Please log in'} self.write(self.serialize(ret)) # TODO: make asynchronous? Underlying library isn't... and we ARE making disk calls :(
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
SaltAPIHandler.get
python
def get(self): ''' An endpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: text GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"} ''' ret = {"clients": list(self.saltclients.keys()), "return": "Welcome"} self.write(self.serialize(ret))
An endpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: text GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L795-L831
[ "def serialize(self, data):\n '''\n Serlialize the output based on the Accept header\n '''\n self.set_header('Content-Type', self.content_type)\n\n return self.dumper(data)\n" ]
class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Main API handler for base "/" ''' @tornado.web.asynchronous def post(self): ''' Send one or more Salt commands (lowstates) in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ -d arg .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** Responses are an in-order list of the lowstate's return data. In the event of an exception running a command the return will be a string instead of a mapping. .. code-block:: text HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true .. admonition:: multiple commands Note that if multiple :term:`lowstate` structures are sent, the Salt API will execute them in serial, and will not stop execution upon failure of a previous job. If you need to have commands executed in order and stop on failure please use compound-command-execution. ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.disbatch() @tornado.gen.coroutine def disbatch(self): ''' Disbatch all lowstates to the appropriate clients ''' ret = [] # check clients before going, we want to throw 400 if one is bad for low in self.lowstate: if not self._verify_client(low): return # Make sure we have 'token' or 'username'/'password' in each low chunk. # Salt will verify the credentials are correct. if self.token is not None and 'token' not in low: low['token'] = self.token if not (('token' in low) or ('username' in low and 'password' in low and 'eauth' in low)): ret.append('Failed to authenticate') break # disbatch to the correct handler try: chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low) ret.append(chunk_ret) except (AuthenticationError, AuthorizationError, EauthAuthenticationError): ret.append('Failed to authenticate') break except Exception as ex: ret.append('Unexpected exception while handling request: {0}'.format(ex)) log.error('Unexpected exception while handling request:', exc_info=True) if not self._finished: self.write(self.serialize({'return': ret})) self.finish() @tornado.gen.coroutine def _disbatch_local(self, chunk): ''' Dispatch local client commands ''' # Generate jid and find all minions before triggering a job to subscribe all returns from minions full_return = chunk.pop('full_return', False) chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts) if not chunk.get('jid', None) else chunk['jid'] minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob'))) def subscribe_minion(minion): salt_evt = self.application.event_listener.get_event( self, tag='salt/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) syndic_evt = self.application.event_listener.get_event( self, tag='syndic/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) return salt_evt, syndic_evt # start listening for the event before we fire the job to avoid races events = [] for minion in minions: salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) # if the job didn't publish, lets not wait around for nothing # TODO: set header?? if 'jid' not in pub_data: for future in events: try: future.set_result(None) except Exception: pass raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.') # get_event for missing minion for minion in list(set(pub_data['minions']) - set(minions)): salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) # Map of minion_id -> returned for all minions we think we need to wait on minions = {m: False for m in pub_data['minions']} # minimum time required for return to complete. By default no waiting, if # we are a syndic then we must wait syndic_wait at a minimum min_wait_time = Future() min_wait_time.set_result(True) # wait syndic a while to avoid missing published events if self.application.opts['order_masters']: min_wait_time = tornado.gen.sleep(self.application.opts['syndic_wait']) # To ensure job_not_running and all_return are terminated by each other, communicate using a future is_timed_out = tornado.gen.sleep(self.application.opts['gather_job_timeout']) is_finished = Future() # ping until the job is not running, while doing so, if we see new minions returning # that they are running the job, add them to the list tornado.ioloop.IOLoop.current().spawn_callback(self.job_not_running, pub_data['jid'], chunk['tgt'], f_call['kwargs']['tgt_type'], minions, is_finished) def more_todo(): ''' Check if there are any more minions we are waiting on returns from ''' return any(x is False for x in six.itervalues(minions)) # here we want to follow the behavior of LocalClient.get_iter_returns # namely we want to wait at least syndic_wait (assuming we are a syndic) # and that there are no more jobs running on minions. We are allowed to exit # early if gather_job_timeout has been exceeded chunk_ret = {} while True: to_wait = events+[is_finished, is_timed_out] if not min_wait_time.done(): to_wait += [min_wait_time] def cancel_inflight_futures(): for event in to_wait: if not event.done() and event is not is_timed_out: event.set_result(None) f = yield Any(to_wait) try: # When finished entire routine, cleanup other futures and return result if f is is_finished or f is is_timed_out: cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) elif f is min_wait_time: if not more_todo(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) continue f_result = f.result() if f in events: events.remove(f) # if this is a start, then we need to add it to the pile if f_result['tag'].endswith('/new'): for minion_id in f_result['data']['minions']: if minion_id not in minions: minions[minion_id] = False else: chunk_ret[f_result['data']['id']] = f_result if full_return else f_result['data']['return'] # clear finished event future minions[f_result['data']['id']] = True # if there are no more minions to wait for, then we are done if not more_todo() and min_wait_time.done(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) except TimeoutException: break @tornado.gen.coroutine def job_not_running(self, jid, tgt, tgt_type, minions, is_finished): ''' Return a future which will complete once jid (passed in) is no longer running on tgt ''' ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False while True: try: event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) event = yield event except TimeoutException: if not event.done(): event.set_result(None) if not minion_running or is_finished.done(): raise tornado.gen.Return(True) else: ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False continue # Minions can return, we want to see if the job is running... if event['data'].get('return', {}) == {}: continue if event['data']['id'] not in minions: minions[event['data']['id']] = False minion_running = True @tornado.gen.coroutine def _disbatch_local_async(self, chunk): ''' Disbatch local client_async commands ''' f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {})) raise tornado.gen.Return(pub_data) @tornado.gen.coroutine def _disbatch_runner(self, chunk): ''' Disbatch runner client commands ''' full_return = chunk.pop('full_return', False) pub_data = self.saltclients['runner'](chunk) tag = pub_data['tag'] + '/ret' try: event = yield self.application.event_listener.get_event(self, tag=tag) # only return the return data ret = event if full_return else event['data']['return'] raise tornado.gen.Return(ret) except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute') @tornado.gen.coroutine def _disbatch_runner_async(self, chunk): ''' Disbatch runner client_async commands ''' pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data) # salt.utils.args.format_call doesn't work for functions having the # annotation tornado.gen.coroutine def _format_call_run_job_async(self, chunk): f_call = salt.utils.args.format_call( salt.client.LocalClient.run_job, chunk, is_class_method=True) f_call.get('kwargs', {})['io_loop'] = tornado.ioloop.IOLoop.current() return f_call
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
SaltAPIHandler.disbatch
python
def disbatch(self): ''' Disbatch all lowstates to the appropriate clients ''' ret = [] # check clients before going, we want to throw 400 if one is bad for low in self.lowstate: if not self._verify_client(low): return # Make sure we have 'token' or 'username'/'password' in each low chunk. # Salt will verify the credentials are correct. if self.token is not None and 'token' not in low: low['token'] = self.token if not (('token' in low) or ('username' in low and 'password' in low and 'eauth' in low)): ret.append('Failed to authenticate') break # disbatch to the correct handler try: chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low) ret.append(chunk_ret) except (AuthenticationError, AuthorizationError, EauthAuthenticationError): ret.append('Failed to authenticate') break except Exception as ex: ret.append('Unexpected exception while handling request: {0}'.format(ex)) log.error('Unexpected exception while handling request:', exc_info=True) if not self._finished: self.write(self.serialize({'return': ret})) self.finish()
Disbatch all lowstates to the appropriate clients
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L912-L946
[ "def _verify_client(self, low):\n '''\n Verify that the client is in fact one we have\n '''\n if 'client' not in low or low.get('client') not in self.saltclients:\n self.set_status(400)\n self.write(\"400 Invalid Client: Client not found in salt clients\")\n self.finish()\n return False\n return True\n", "def serialize(self, data):\n '''\n Serlialize the output based on the Accept header\n '''\n self.set_header('Content-Type', self.content_type)\n\n return self.dumper(data)\n" ]
class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Main API handler for base "/" ''' def get(self): ''' An endpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: text GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"} ''' ret = {"clients": list(self.saltclients.keys()), "return": "Welcome"} self.write(self.serialize(ret)) @tornado.web.asynchronous def post(self): ''' Send one or more Salt commands (lowstates) in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ -d arg .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** Responses are an in-order list of the lowstate's return data. In the event of an exception running a command the return will be a string instead of a mapping. .. code-block:: text HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true .. admonition:: multiple commands Note that if multiple :term:`lowstate` structures are sent, the Salt API will execute them in serial, and will not stop execution upon failure of a previous job. If you need to have commands executed in order and stop on failure please use compound-command-execution. ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.disbatch() @tornado.gen.coroutine @tornado.gen.coroutine def _disbatch_local(self, chunk): ''' Dispatch local client commands ''' # Generate jid and find all minions before triggering a job to subscribe all returns from minions full_return = chunk.pop('full_return', False) chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts) if not chunk.get('jid', None) else chunk['jid'] minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob'))) def subscribe_minion(minion): salt_evt = self.application.event_listener.get_event( self, tag='salt/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) syndic_evt = self.application.event_listener.get_event( self, tag='syndic/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) return salt_evt, syndic_evt # start listening for the event before we fire the job to avoid races events = [] for minion in minions: salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) # if the job didn't publish, lets not wait around for nothing # TODO: set header?? if 'jid' not in pub_data: for future in events: try: future.set_result(None) except Exception: pass raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.') # get_event for missing minion for minion in list(set(pub_data['minions']) - set(minions)): salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) # Map of minion_id -> returned for all minions we think we need to wait on minions = {m: False for m in pub_data['minions']} # minimum time required for return to complete. By default no waiting, if # we are a syndic then we must wait syndic_wait at a minimum min_wait_time = Future() min_wait_time.set_result(True) # wait syndic a while to avoid missing published events if self.application.opts['order_masters']: min_wait_time = tornado.gen.sleep(self.application.opts['syndic_wait']) # To ensure job_not_running and all_return are terminated by each other, communicate using a future is_timed_out = tornado.gen.sleep(self.application.opts['gather_job_timeout']) is_finished = Future() # ping until the job is not running, while doing so, if we see new minions returning # that they are running the job, add them to the list tornado.ioloop.IOLoop.current().spawn_callback(self.job_not_running, pub_data['jid'], chunk['tgt'], f_call['kwargs']['tgt_type'], minions, is_finished) def more_todo(): ''' Check if there are any more minions we are waiting on returns from ''' return any(x is False for x in six.itervalues(minions)) # here we want to follow the behavior of LocalClient.get_iter_returns # namely we want to wait at least syndic_wait (assuming we are a syndic) # and that there are no more jobs running on minions. We are allowed to exit # early if gather_job_timeout has been exceeded chunk_ret = {} while True: to_wait = events+[is_finished, is_timed_out] if not min_wait_time.done(): to_wait += [min_wait_time] def cancel_inflight_futures(): for event in to_wait: if not event.done() and event is not is_timed_out: event.set_result(None) f = yield Any(to_wait) try: # When finished entire routine, cleanup other futures and return result if f is is_finished or f is is_timed_out: cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) elif f is min_wait_time: if not more_todo(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) continue f_result = f.result() if f in events: events.remove(f) # if this is a start, then we need to add it to the pile if f_result['tag'].endswith('/new'): for minion_id in f_result['data']['minions']: if minion_id not in minions: minions[minion_id] = False else: chunk_ret[f_result['data']['id']] = f_result if full_return else f_result['data']['return'] # clear finished event future minions[f_result['data']['id']] = True # if there are no more minions to wait for, then we are done if not more_todo() and min_wait_time.done(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) except TimeoutException: break @tornado.gen.coroutine def job_not_running(self, jid, tgt, tgt_type, minions, is_finished): ''' Return a future which will complete once jid (passed in) is no longer running on tgt ''' ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False while True: try: event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) event = yield event except TimeoutException: if not event.done(): event.set_result(None) if not minion_running or is_finished.done(): raise tornado.gen.Return(True) else: ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False continue # Minions can return, we want to see if the job is running... if event['data'].get('return', {}) == {}: continue if event['data']['id'] not in minions: minions[event['data']['id']] = False minion_running = True @tornado.gen.coroutine def _disbatch_local_async(self, chunk): ''' Disbatch local client_async commands ''' f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {})) raise tornado.gen.Return(pub_data) @tornado.gen.coroutine def _disbatch_runner(self, chunk): ''' Disbatch runner client commands ''' full_return = chunk.pop('full_return', False) pub_data = self.saltclients['runner'](chunk) tag = pub_data['tag'] + '/ret' try: event = yield self.application.event_listener.get_event(self, tag=tag) # only return the return data ret = event if full_return else event['data']['return'] raise tornado.gen.Return(ret) except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute') @tornado.gen.coroutine def _disbatch_runner_async(self, chunk): ''' Disbatch runner client_async commands ''' pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data) # salt.utils.args.format_call doesn't work for functions having the # annotation tornado.gen.coroutine def _format_call_run_job_async(self, chunk): f_call = salt.utils.args.format_call( salt.client.LocalClient.run_job, chunk, is_class_method=True) f_call.get('kwargs', {})['io_loop'] = tornado.ioloop.IOLoop.current() return f_call
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
SaltAPIHandler._disbatch_local
python
def _disbatch_local(self, chunk): ''' Dispatch local client commands ''' # Generate jid and find all minions before triggering a job to subscribe all returns from minions full_return = chunk.pop('full_return', False) chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts) if not chunk.get('jid', None) else chunk['jid'] minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob'))) def subscribe_minion(minion): salt_evt = self.application.event_listener.get_event( self, tag='salt/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) syndic_evt = self.application.event_listener.get_event( self, tag='syndic/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) return salt_evt, syndic_evt # start listening for the event before we fire the job to avoid races events = [] for minion in minions: salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) # if the job didn't publish, lets not wait around for nothing # TODO: set header?? if 'jid' not in pub_data: for future in events: try: future.set_result(None) except Exception: pass raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.') # get_event for missing minion for minion in list(set(pub_data['minions']) - set(minions)): salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) # Map of minion_id -> returned for all minions we think we need to wait on minions = {m: False for m in pub_data['minions']} # minimum time required for return to complete. By default no waiting, if # we are a syndic then we must wait syndic_wait at a minimum min_wait_time = Future() min_wait_time.set_result(True) # wait syndic a while to avoid missing published events if self.application.opts['order_masters']: min_wait_time = tornado.gen.sleep(self.application.opts['syndic_wait']) # To ensure job_not_running and all_return are terminated by each other, communicate using a future is_timed_out = tornado.gen.sleep(self.application.opts['gather_job_timeout']) is_finished = Future() # ping until the job is not running, while doing so, if we see new minions returning # that they are running the job, add them to the list tornado.ioloop.IOLoop.current().spawn_callback(self.job_not_running, pub_data['jid'], chunk['tgt'], f_call['kwargs']['tgt_type'], minions, is_finished) def more_todo(): ''' Check if there are any more minions we are waiting on returns from ''' return any(x is False for x in six.itervalues(minions)) # here we want to follow the behavior of LocalClient.get_iter_returns # namely we want to wait at least syndic_wait (assuming we are a syndic) # and that there are no more jobs running on minions. We are allowed to exit # early if gather_job_timeout has been exceeded chunk_ret = {} while True: to_wait = events+[is_finished, is_timed_out] if not min_wait_time.done(): to_wait += [min_wait_time] def cancel_inflight_futures(): for event in to_wait: if not event.done() and event is not is_timed_out: event.set_result(None) f = yield Any(to_wait) try: # When finished entire routine, cleanup other futures and return result if f is is_finished or f is is_timed_out: cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) elif f is min_wait_time: if not more_todo(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) continue f_result = f.result() if f in events: events.remove(f) # if this is a start, then we need to add it to the pile if f_result['tag'].endswith('/new'): for minion_id in f_result['data']['minions']: if minion_id not in minions: minions[minion_id] = False else: chunk_ret[f_result['data']['id']] = f_result if full_return else f_result['data']['return'] # clear finished event future minions[f_result['data']['id']] = True # if there are no more minions to wait for, then we are done if not more_todo() and min_wait_time.done(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) except TimeoutException: break
Dispatch local client commands
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L949-L1070
null
class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Main API handler for base "/" ''' def get(self): ''' An endpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: text GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"} ''' ret = {"clients": list(self.saltclients.keys()), "return": "Welcome"} self.write(self.serialize(ret)) @tornado.web.asynchronous def post(self): ''' Send one or more Salt commands (lowstates) in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ -d arg .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** Responses are an in-order list of the lowstate's return data. In the event of an exception running a command the return will be a string instead of a mapping. .. code-block:: text HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true .. admonition:: multiple commands Note that if multiple :term:`lowstate` structures are sent, the Salt API will execute them in serial, and will not stop execution upon failure of a previous job. If you need to have commands executed in order and stop on failure please use compound-command-execution. ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.disbatch() @tornado.gen.coroutine def disbatch(self): ''' Disbatch all lowstates to the appropriate clients ''' ret = [] # check clients before going, we want to throw 400 if one is bad for low in self.lowstate: if not self._verify_client(low): return # Make sure we have 'token' or 'username'/'password' in each low chunk. # Salt will verify the credentials are correct. if self.token is not None and 'token' not in low: low['token'] = self.token if not (('token' in low) or ('username' in low and 'password' in low and 'eauth' in low)): ret.append('Failed to authenticate') break # disbatch to the correct handler try: chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low) ret.append(chunk_ret) except (AuthenticationError, AuthorizationError, EauthAuthenticationError): ret.append('Failed to authenticate') break except Exception as ex: ret.append('Unexpected exception while handling request: {0}'.format(ex)) log.error('Unexpected exception while handling request:', exc_info=True) if not self._finished: self.write(self.serialize({'return': ret})) self.finish() @tornado.gen.coroutine @tornado.gen.coroutine def job_not_running(self, jid, tgt, tgt_type, minions, is_finished): ''' Return a future which will complete once jid (passed in) is no longer running on tgt ''' ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False while True: try: event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) event = yield event except TimeoutException: if not event.done(): event.set_result(None) if not minion_running or is_finished.done(): raise tornado.gen.Return(True) else: ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False continue # Minions can return, we want to see if the job is running... if event['data'].get('return', {}) == {}: continue if event['data']['id'] not in minions: minions[event['data']['id']] = False minion_running = True @tornado.gen.coroutine def _disbatch_local_async(self, chunk): ''' Disbatch local client_async commands ''' f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {})) raise tornado.gen.Return(pub_data) @tornado.gen.coroutine def _disbatch_runner(self, chunk): ''' Disbatch runner client commands ''' full_return = chunk.pop('full_return', False) pub_data = self.saltclients['runner'](chunk) tag = pub_data['tag'] + '/ret' try: event = yield self.application.event_listener.get_event(self, tag=tag) # only return the return data ret = event if full_return else event['data']['return'] raise tornado.gen.Return(ret) except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute') @tornado.gen.coroutine def _disbatch_runner_async(self, chunk): ''' Disbatch runner client_async commands ''' pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data) # salt.utils.args.format_call doesn't work for functions having the # annotation tornado.gen.coroutine def _format_call_run_job_async(self, chunk): f_call = salt.utils.args.format_call( salt.client.LocalClient.run_job, chunk, is_class_method=True) f_call.get('kwargs', {})['io_loop'] = tornado.ioloop.IOLoop.current() return f_call
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
SaltAPIHandler.job_not_running
python
def job_not_running(self, jid, tgt, tgt_type, minions, is_finished): ''' Return a future which will complete once jid (passed in) is no longer running on tgt ''' ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False while True: try: event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) event = yield event except TimeoutException: if not event.done(): event.set_result(None) if not minion_running or is_finished.done(): raise tornado.gen.Return(True) else: ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False continue # Minions can return, we want to see if the job is running... if event['data'].get('return', {}) == {}: continue if event['data']['id'] not in minions: minions[event['data']['id']] = False minion_running = True
Return a future which will complete once jid (passed in) is no longer running on tgt
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1073-L1111
[ "def tagify(suffix='', prefix='', base=SALT):\n '''\n convenience function to build a namespaced event tag string\n from joining with the TABPART character the base, prefix and suffix\n\n If string prefix is a valid key in TAGS Then use the value of key prefix\n Else use prefix string\n\n If suffix is a list Then join all string elements of suffix individually\n Else use string suffix\n\n '''\n parts = [base, TAGS.get(prefix, prefix)]\n if hasattr(suffix, 'append'): # list so extend parts\n parts.extend(suffix)\n else: # string so append\n parts.append(suffix)\n\n for index, _ in enumerate(parts):\n try:\n parts[index] = salt.utils.stringutils.to_str(parts[index])\n except TypeError:\n parts[index] = str(parts[index])\n return TAGPARTER.join([part for part in parts if part])\n" ]
class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Main API handler for base "/" ''' def get(self): ''' An endpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: text GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"} ''' ret = {"clients": list(self.saltclients.keys()), "return": "Welcome"} self.write(self.serialize(ret)) @tornado.web.asynchronous def post(self): ''' Send one or more Salt commands (lowstates) in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ -d arg .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** Responses are an in-order list of the lowstate's return data. In the event of an exception running a command the return will be a string instead of a mapping. .. code-block:: text HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true .. admonition:: multiple commands Note that if multiple :term:`lowstate` structures are sent, the Salt API will execute them in serial, and will not stop execution upon failure of a previous job. If you need to have commands executed in order and stop on failure please use compound-command-execution. ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.disbatch() @tornado.gen.coroutine def disbatch(self): ''' Disbatch all lowstates to the appropriate clients ''' ret = [] # check clients before going, we want to throw 400 if one is bad for low in self.lowstate: if not self._verify_client(low): return # Make sure we have 'token' or 'username'/'password' in each low chunk. # Salt will verify the credentials are correct. if self.token is not None and 'token' not in low: low['token'] = self.token if not (('token' in low) or ('username' in low and 'password' in low and 'eauth' in low)): ret.append('Failed to authenticate') break # disbatch to the correct handler try: chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low) ret.append(chunk_ret) except (AuthenticationError, AuthorizationError, EauthAuthenticationError): ret.append('Failed to authenticate') break except Exception as ex: ret.append('Unexpected exception while handling request: {0}'.format(ex)) log.error('Unexpected exception while handling request:', exc_info=True) if not self._finished: self.write(self.serialize({'return': ret})) self.finish() @tornado.gen.coroutine def _disbatch_local(self, chunk): ''' Dispatch local client commands ''' # Generate jid and find all minions before triggering a job to subscribe all returns from minions full_return = chunk.pop('full_return', False) chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts) if not chunk.get('jid', None) else chunk['jid'] minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob'))) def subscribe_minion(minion): salt_evt = self.application.event_listener.get_event( self, tag='salt/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) syndic_evt = self.application.event_listener.get_event( self, tag='syndic/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) return salt_evt, syndic_evt # start listening for the event before we fire the job to avoid races events = [] for minion in minions: salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) # if the job didn't publish, lets not wait around for nothing # TODO: set header?? if 'jid' not in pub_data: for future in events: try: future.set_result(None) except Exception: pass raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.') # get_event for missing minion for minion in list(set(pub_data['minions']) - set(minions)): salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) # Map of minion_id -> returned for all minions we think we need to wait on minions = {m: False for m in pub_data['minions']} # minimum time required for return to complete. By default no waiting, if # we are a syndic then we must wait syndic_wait at a minimum min_wait_time = Future() min_wait_time.set_result(True) # wait syndic a while to avoid missing published events if self.application.opts['order_masters']: min_wait_time = tornado.gen.sleep(self.application.opts['syndic_wait']) # To ensure job_not_running and all_return are terminated by each other, communicate using a future is_timed_out = tornado.gen.sleep(self.application.opts['gather_job_timeout']) is_finished = Future() # ping until the job is not running, while doing so, if we see new minions returning # that they are running the job, add them to the list tornado.ioloop.IOLoop.current().spawn_callback(self.job_not_running, pub_data['jid'], chunk['tgt'], f_call['kwargs']['tgt_type'], minions, is_finished) def more_todo(): ''' Check if there are any more minions we are waiting on returns from ''' return any(x is False for x in six.itervalues(minions)) # here we want to follow the behavior of LocalClient.get_iter_returns # namely we want to wait at least syndic_wait (assuming we are a syndic) # and that there are no more jobs running on minions. We are allowed to exit # early if gather_job_timeout has been exceeded chunk_ret = {} while True: to_wait = events+[is_finished, is_timed_out] if not min_wait_time.done(): to_wait += [min_wait_time] def cancel_inflight_futures(): for event in to_wait: if not event.done() and event is not is_timed_out: event.set_result(None) f = yield Any(to_wait) try: # When finished entire routine, cleanup other futures and return result if f is is_finished or f is is_timed_out: cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) elif f is min_wait_time: if not more_todo(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) continue f_result = f.result() if f in events: events.remove(f) # if this is a start, then we need to add it to the pile if f_result['tag'].endswith('/new'): for minion_id in f_result['data']['minions']: if minion_id not in minions: minions[minion_id] = False else: chunk_ret[f_result['data']['id']] = f_result if full_return else f_result['data']['return'] # clear finished event future minions[f_result['data']['id']] = True # if there are no more minions to wait for, then we are done if not more_todo() and min_wait_time.done(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) except TimeoutException: break @tornado.gen.coroutine @tornado.gen.coroutine def _disbatch_local_async(self, chunk): ''' Disbatch local client_async commands ''' f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {})) raise tornado.gen.Return(pub_data) @tornado.gen.coroutine def _disbatch_runner(self, chunk): ''' Disbatch runner client commands ''' full_return = chunk.pop('full_return', False) pub_data = self.saltclients['runner'](chunk) tag = pub_data['tag'] + '/ret' try: event = yield self.application.event_listener.get_event(self, tag=tag) # only return the return data ret = event if full_return else event['data']['return'] raise tornado.gen.Return(ret) except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute') @tornado.gen.coroutine def _disbatch_runner_async(self, chunk): ''' Disbatch runner client_async commands ''' pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data) # salt.utils.args.format_call doesn't work for functions having the # annotation tornado.gen.coroutine def _format_call_run_job_async(self, chunk): f_call = salt.utils.args.format_call( salt.client.LocalClient.run_job, chunk, is_class_method=True) f_call.get('kwargs', {})['io_loop'] = tornado.ioloop.IOLoop.current() return f_call
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
SaltAPIHandler._disbatch_local_async
python
def _disbatch_local_async(self, chunk): ''' Disbatch local client_async commands ''' f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {})) raise tornado.gen.Return(pub_data)
Disbatch local client_async commands
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1114-L1122
null
class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Main API handler for base "/" ''' def get(self): ''' An endpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: text GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"} ''' ret = {"clients": list(self.saltclients.keys()), "return": "Welcome"} self.write(self.serialize(ret)) @tornado.web.asynchronous def post(self): ''' Send one or more Salt commands (lowstates) in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ -d arg .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** Responses are an in-order list of the lowstate's return data. In the event of an exception running a command the return will be a string instead of a mapping. .. code-block:: text HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true .. admonition:: multiple commands Note that if multiple :term:`lowstate` structures are sent, the Salt API will execute them in serial, and will not stop execution upon failure of a previous job. If you need to have commands executed in order and stop on failure please use compound-command-execution. ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.disbatch() @tornado.gen.coroutine def disbatch(self): ''' Disbatch all lowstates to the appropriate clients ''' ret = [] # check clients before going, we want to throw 400 if one is bad for low in self.lowstate: if not self._verify_client(low): return # Make sure we have 'token' or 'username'/'password' in each low chunk. # Salt will verify the credentials are correct. if self.token is not None and 'token' not in low: low['token'] = self.token if not (('token' in low) or ('username' in low and 'password' in low and 'eauth' in low)): ret.append('Failed to authenticate') break # disbatch to the correct handler try: chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low) ret.append(chunk_ret) except (AuthenticationError, AuthorizationError, EauthAuthenticationError): ret.append('Failed to authenticate') break except Exception as ex: ret.append('Unexpected exception while handling request: {0}'.format(ex)) log.error('Unexpected exception while handling request:', exc_info=True) if not self._finished: self.write(self.serialize({'return': ret})) self.finish() @tornado.gen.coroutine def _disbatch_local(self, chunk): ''' Dispatch local client commands ''' # Generate jid and find all minions before triggering a job to subscribe all returns from minions full_return = chunk.pop('full_return', False) chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts) if not chunk.get('jid', None) else chunk['jid'] minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob'))) def subscribe_minion(minion): salt_evt = self.application.event_listener.get_event( self, tag='salt/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) syndic_evt = self.application.event_listener.get_event( self, tag='syndic/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) return salt_evt, syndic_evt # start listening for the event before we fire the job to avoid races events = [] for minion in minions: salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) # if the job didn't publish, lets not wait around for nothing # TODO: set header?? if 'jid' not in pub_data: for future in events: try: future.set_result(None) except Exception: pass raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.') # get_event for missing minion for minion in list(set(pub_data['minions']) - set(minions)): salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) # Map of minion_id -> returned for all minions we think we need to wait on minions = {m: False for m in pub_data['minions']} # minimum time required for return to complete. By default no waiting, if # we are a syndic then we must wait syndic_wait at a minimum min_wait_time = Future() min_wait_time.set_result(True) # wait syndic a while to avoid missing published events if self.application.opts['order_masters']: min_wait_time = tornado.gen.sleep(self.application.opts['syndic_wait']) # To ensure job_not_running and all_return are terminated by each other, communicate using a future is_timed_out = tornado.gen.sleep(self.application.opts['gather_job_timeout']) is_finished = Future() # ping until the job is not running, while doing so, if we see new minions returning # that they are running the job, add them to the list tornado.ioloop.IOLoop.current().spawn_callback(self.job_not_running, pub_data['jid'], chunk['tgt'], f_call['kwargs']['tgt_type'], minions, is_finished) def more_todo(): ''' Check if there are any more minions we are waiting on returns from ''' return any(x is False for x in six.itervalues(minions)) # here we want to follow the behavior of LocalClient.get_iter_returns # namely we want to wait at least syndic_wait (assuming we are a syndic) # and that there are no more jobs running on minions. We are allowed to exit # early if gather_job_timeout has been exceeded chunk_ret = {} while True: to_wait = events+[is_finished, is_timed_out] if not min_wait_time.done(): to_wait += [min_wait_time] def cancel_inflight_futures(): for event in to_wait: if not event.done() and event is not is_timed_out: event.set_result(None) f = yield Any(to_wait) try: # When finished entire routine, cleanup other futures and return result if f is is_finished or f is is_timed_out: cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) elif f is min_wait_time: if not more_todo(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) continue f_result = f.result() if f in events: events.remove(f) # if this is a start, then we need to add it to the pile if f_result['tag'].endswith('/new'): for minion_id in f_result['data']['minions']: if minion_id not in minions: minions[minion_id] = False else: chunk_ret[f_result['data']['id']] = f_result if full_return else f_result['data']['return'] # clear finished event future minions[f_result['data']['id']] = True # if there are no more minions to wait for, then we are done if not more_todo() and min_wait_time.done(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) except TimeoutException: break @tornado.gen.coroutine def job_not_running(self, jid, tgt, tgt_type, minions, is_finished): ''' Return a future which will complete once jid (passed in) is no longer running on tgt ''' ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False while True: try: event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) event = yield event except TimeoutException: if not event.done(): event.set_result(None) if not minion_running or is_finished.done(): raise tornado.gen.Return(True) else: ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False continue # Minions can return, we want to see if the job is running... if event['data'].get('return', {}) == {}: continue if event['data']['id'] not in minions: minions[event['data']['id']] = False minion_running = True @tornado.gen.coroutine @tornado.gen.coroutine def _disbatch_runner(self, chunk): ''' Disbatch runner client commands ''' full_return = chunk.pop('full_return', False) pub_data = self.saltclients['runner'](chunk) tag = pub_data['tag'] + '/ret' try: event = yield self.application.event_listener.get_event(self, tag=tag) # only return the return data ret = event if full_return else event['data']['return'] raise tornado.gen.Return(ret) except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute') @tornado.gen.coroutine def _disbatch_runner_async(self, chunk): ''' Disbatch runner client_async commands ''' pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data) # salt.utils.args.format_call doesn't work for functions having the # annotation tornado.gen.coroutine def _format_call_run_job_async(self, chunk): f_call = salt.utils.args.format_call( salt.client.LocalClient.run_job, chunk, is_class_method=True) f_call.get('kwargs', {})['io_loop'] = tornado.ioloop.IOLoop.current() return f_call
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
SaltAPIHandler._disbatch_runner
python
def _disbatch_runner(self, chunk): ''' Disbatch runner client commands ''' full_return = chunk.pop('full_return', False) pub_data = self.saltclients['runner'](chunk) tag = pub_data['tag'] + '/ret' try: event = yield self.application.event_listener.get_event(self, tag=tag) # only return the return data ret = event if full_return else event['data']['return'] raise tornado.gen.Return(ret) except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute')
Disbatch runner client commands
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1125-L1139
null
class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Main API handler for base "/" ''' def get(self): ''' An endpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: text GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"} ''' ret = {"clients": list(self.saltclients.keys()), "return": "Welcome"} self.write(self.serialize(ret)) @tornado.web.asynchronous def post(self): ''' Send one or more Salt commands (lowstates) in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ -d arg .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** Responses are an in-order list of the lowstate's return data. In the event of an exception running a command the return will be a string instead of a mapping. .. code-block:: text HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true .. admonition:: multiple commands Note that if multiple :term:`lowstate` structures are sent, the Salt API will execute them in serial, and will not stop execution upon failure of a previous job. If you need to have commands executed in order and stop on failure please use compound-command-execution. ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.disbatch() @tornado.gen.coroutine def disbatch(self): ''' Disbatch all lowstates to the appropriate clients ''' ret = [] # check clients before going, we want to throw 400 if one is bad for low in self.lowstate: if not self._verify_client(low): return # Make sure we have 'token' or 'username'/'password' in each low chunk. # Salt will verify the credentials are correct. if self.token is not None and 'token' not in low: low['token'] = self.token if not (('token' in low) or ('username' in low and 'password' in low and 'eauth' in low)): ret.append('Failed to authenticate') break # disbatch to the correct handler try: chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low) ret.append(chunk_ret) except (AuthenticationError, AuthorizationError, EauthAuthenticationError): ret.append('Failed to authenticate') break except Exception as ex: ret.append('Unexpected exception while handling request: {0}'.format(ex)) log.error('Unexpected exception while handling request:', exc_info=True) if not self._finished: self.write(self.serialize({'return': ret})) self.finish() @tornado.gen.coroutine def _disbatch_local(self, chunk): ''' Dispatch local client commands ''' # Generate jid and find all minions before triggering a job to subscribe all returns from minions full_return = chunk.pop('full_return', False) chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts) if not chunk.get('jid', None) else chunk['jid'] minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob'))) def subscribe_minion(minion): salt_evt = self.application.event_listener.get_event( self, tag='salt/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) syndic_evt = self.application.event_listener.get_event( self, tag='syndic/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) return salt_evt, syndic_evt # start listening for the event before we fire the job to avoid races events = [] for minion in minions: salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) # if the job didn't publish, lets not wait around for nothing # TODO: set header?? if 'jid' not in pub_data: for future in events: try: future.set_result(None) except Exception: pass raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.') # get_event for missing minion for minion in list(set(pub_data['minions']) - set(minions)): salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) # Map of minion_id -> returned for all minions we think we need to wait on minions = {m: False for m in pub_data['minions']} # minimum time required for return to complete. By default no waiting, if # we are a syndic then we must wait syndic_wait at a minimum min_wait_time = Future() min_wait_time.set_result(True) # wait syndic a while to avoid missing published events if self.application.opts['order_masters']: min_wait_time = tornado.gen.sleep(self.application.opts['syndic_wait']) # To ensure job_not_running and all_return are terminated by each other, communicate using a future is_timed_out = tornado.gen.sleep(self.application.opts['gather_job_timeout']) is_finished = Future() # ping until the job is not running, while doing so, if we see new minions returning # that they are running the job, add them to the list tornado.ioloop.IOLoop.current().spawn_callback(self.job_not_running, pub_data['jid'], chunk['tgt'], f_call['kwargs']['tgt_type'], minions, is_finished) def more_todo(): ''' Check if there are any more minions we are waiting on returns from ''' return any(x is False for x in six.itervalues(minions)) # here we want to follow the behavior of LocalClient.get_iter_returns # namely we want to wait at least syndic_wait (assuming we are a syndic) # and that there are no more jobs running on minions. We are allowed to exit # early if gather_job_timeout has been exceeded chunk_ret = {} while True: to_wait = events+[is_finished, is_timed_out] if not min_wait_time.done(): to_wait += [min_wait_time] def cancel_inflight_futures(): for event in to_wait: if not event.done() and event is not is_timed_out: event.set_result(None) f = yield Any(to_wait) try: # When finished entire routine, cleanup other futures and return result if f is is_finished or f is is_timed_out: cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) elif f is min_wait_time: if not more_todo(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) continue f_result = f.result() if f in events: events.remove(f) # if this is a start, then we need to add it to the pile if f_result['tag'].endswith('/new'): for minion_id in f_result['data']['minions']: if minion_id not in minions: minions[minion_id] = False else: chunk_ret[f_result['data']['id']] = f_result if full_return else f_result['data']['return'] # clear finished event future minions[f_result['data']['id']] = True # if there are no more minions to wait for, then we are done if not more_todo() and min_wait_time.done(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) except TimeoutException: break @tornado.gen.coroutine def job_not_running(self, jid, tgt, tgt_type, minions, is_finished): ''' Return a future which will complete once jid (passed in) is no longer running on tgt ''' ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False while True: try: event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) event = yield event except TimeoutException: if not event.done(): event.set_result(None) if not minion_running or is_finished.done(): raise tornado.gen.Return(True) else: ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False continue # Minions can return, we want to see if the job is running... if event['data'].get('return', {}) == {}: continue if event['data']['id'] not in minions: minions[event['data']['id']] = False minion_running = True @tornado.gen.coroutine def _disbatch_local_async(self, chunk): ''' Disbatch local client_async commands ''' f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {})) raise tornado.gen.Return(pub_data) @tornado.gen.coroutine @tornado.gen.coroutine def _disbatch_runner_async(self, chunk): ''' Disbatch runner client_async commands ''' pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data) # salt.utils.args.format_call doesn't work for functions having the # annotation tornado.gen.coroutine def _format_call_run_job_async(self, chunk): f_call = salt.utils.args.format_call( salt.client.LocalClient.run_job, chunk, is_class_method=True) f_call.get('kwargs', {})['io_loop'] = tornado.ioloop.IOLoop.current() return f_call
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
SaltAPIHandler._disbatch_runner_async
python
def _disbatch_runner_async(self, chunk): ''' Disbatch runner client_async commands ''' pub_data = self.saltclients['runner'](chunk) raise tornado.gen.Return(pub_data)
Disbatch runner client_async commands
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1142-L1147
null
class SaltAPIHandler(BaseSaltAPIHandler): # pylint: disable=W0223 ''' Main API handler for base "/" ''' def get(self): ''' An endpoint to determine salt-api capabilities .. http:get:: / :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000 .. code-block:: text GET / HTTP/1.1 Host: localhost:8000 Accept: application/json **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Type: application/json Content-Legnth: 83 {"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"} ''' ret = {"clients": list(self.saltclients.keys()), "return": "Welcome"} self.write(self.serialize(ret)) @tornado.web.asynchronous def post(self): ''' Send one or more Salt commands (lowstates) in the request body .. http:post:: / :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. **Example request:** .. code-block:: bash curl -si https://localhost:8000 \\ -H "Accept: application/x-yaml" \\ -H "X-Auth-Token: d40d1e1e" \\ -d client=local \\ -d tgt='*' \\ -d fun='test.ping' \\ -d arg .. code-block:: text POST / HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml X-Auth-Token: d40d1e1e Content-Length: 36 Content-Type: application/x-www-form-urlencoded fun=test.ping&arg&client=local&tgt=* **Example response:** Responses are an in-order list of the lowstate's return data. In the event of an exception running a command the return will be a string instead of a mapping. .. code-block:: text HTTP/1.1 200 OK Content-Length: 200 Allow: GET, HEAD, POST Content-Type: application/x-yaml return: - ms-0: true ms-1: true ms-2: true ms-3: true ms-4: true .. admonition:: multiple commands Note that if multiple :term:`lowstate` structures are sent, the Salt API will execute them in serial, and will not stop execution upon failure of a previous job. If you need to have commands executed in order and stop on failure please use compound-command-execution. ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.disbatch() @tornado.gen.coroutine def disbatch(self): ''' Disbatch all lowstates to the appropriate clients ''' ret = [] # check clients before going, we want to throw 400 if one is bad for low in self.lowstate: if not self._verify_client(low): return # Make sure we have 'token' or 'username'/'password' in each low chunk. # Salt will verify the credentials are correct. if self.token is not None and 'token' not in low: low['token'] = self.token if not (('token' in low) or ('username' in low and 'password' in low and 'eauth' in low)): ret.append('Failed to authenticate') break # disbatch to the correct handler try: chunk_ret = yield getattr(self, '_disbatch_{0}'.format(low['client']))(low) ret.append(chunk_ret) except (AuthenticationError, AuthorizationError, EauthAuthenticationError): ret.append('Failed to authenticate') break except Exception as ex: ret.append('Unexpected exception while handling request: {0}'.format(ex)) log.error('Unexpected exception while handling request:', exc_info=True) if not self._finished: self.write(self.serialize({'return': ret})) self.finish() @tornado.gen.coroutine def _disbatch_local(self, chunk): ''' Dispatch local client commands ''' # Generate jid and find all minions before triggering a job to subscribe all returns from minions full_return = chunk.pop('full_return', False) chunk['jid'] = salt.utils.jid.gen_jid(self.application.opts) if not chunk.get('jid', None) else chunk['jid'] minions = set(self.ckminions.check_minions(chunk['tgt'], chunk.get('tgt_type', 'glob'))) def subscribe_minion(minion): salt_evt = self.application.event_listener.get_event( self, tag='salt/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) syndic_evt = self.application.event_listener.get_event( self, tag='syndic/job/{}/ret/{}'.format(chunk['jid'], minion), matcher=EventListener.exact_matcher) return salt_evt, syndic_evt # start listening for the event before we fire the job to avoid races events = [] for minion in minions: salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local'](*f_call.get('args', ()), **f_call.get('kwargs', {})) # if the job didn't publish, lets not wait around for nothing # TODO: set header?? if 'jid' not in pub_data: for future in events: try: future.set_result(None) except Exception: pass raise tornado.gen.Return('No minions matched the target. No command was sent, no jid was assigned.') # get_event for missing minion for minion in list(set(pub_data['minions']) - set(minions)): salt_evt, syndic_evt = subscribe_minion(minion) events.append(salt_evt) events.append(syndic_evt) # Map of minion_id -> returned for all minions we think we need to wait on minions = {m: False for m in pub_data['minions']} # minimum time required for return to complete. By default no waiting, if # we are a syndic then we must wait syndic_wait at a minimum min_wait_time = Future() min_wait_time.set_result(True) # wait syndic a while to avoid missing published events if self.application.opts['order_masters']: min_wait_time = tornado.gen.sleep(self.application.opts['syndic_wait']) # To ensure job_not_running and all_return are terminated by each other, communicate using a future is_timed_out = tornado.gen.sleep(self.application.opts['gather_job_timeout']) is_finished = Future() # ping until the job is not running, while doing so, if we see new minions returning # that they are running the job, add them to the list tornado.ioloop.IOLoop.current().spawn_callback(self.job_not_running, pub_data['jid'], chunk['tgt'], f_call['kwargs']['tgt_type'], minions, is_finished) def more_todo(): ''' Check if there are any more minions we are waiting on returns from ''' return any(x is False for x in six.itervalues(minions)) # here we want to follow the behavior of LocalClient.get_iter_returns # namely we want to wait at least syndic_wait (assuming we are a syndic) # and that there are no more jobs running on minions. We are allowed to exit # early if gather_job_timeout has been exceeded chunk_ret = {} while True: to_wait = events+[is_finished, is_timed_out] if not min_wait_time.done(): to_wait += [min_wait_time] def cancel_inflight_futures(): for event in to_wait: if not event.done() and event is not is_timed_out: event.set_result(None) f = yield Any(to_wait) try: # When finished entire routine, cleanup other futures and return result if f is is_finished or f is is_timed_out: cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) elif f is min_wait_time: if not more_todo(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) continue f_result = f.result() if f in events: events.remove(f) # if this is a start, then we need to add it to the pile if f_result['tag'].endswith('/new'): for minion_id in f_result['data']['minions']: if minion_id not in minions: minions[minion_id] = False else: chunk_ret[f_result['data']['id']] = f_result if full_return else f_result['data']['return'] # clear finished event future minions[f_result['data']['id']] = True # if there are no more minions to wait for, then we are done if not more_todo() and min_wait_time.done(): cancel_inflight_futures() raise tornado.gen.Return(chunk_ret) except TimeoutException: break @tornado.gen.coroutine def job_not_running(self, jid, tgt, tgt_type, minions, is_finished): ''' Return a future which will complete once jid (passed in) is no longer running on tgt ''' ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False while True: try: event = self.application.event_listener.get_event(self, tag=ping_tag, timeout=self.application.opts['gather_job_timeout']) event = yield event except TimeoutException: if not event.done(): event.set_result(None) if not minion_running or is_finished.done(): raise tornado.gen.Return(True) else: ping_pub_data = yield self.saltclients['local'](tgt, 'saltutil.find_job', [jid], tgt_type=tgt_type) ping_tag = tagify([ping_pub_data['jid'], 'ret'], 'job') minion_running = False continue # Minions can return, we want to see if the job is running... if event['data'].get('return', {}) == {}: continue if event['data']['id'] not in minions: minions[event['data']['id']] = False minion_running = True @tornado.gen.coroutine def _disbatch_local_async(self, chunk): ''' Disbatch local client_async commands ''' f_call = self._format_call_run_job_async(chunk) # fire a job off pub_data = yield self.saltclients['local_async'](*f_call.get('args', ()), **f_call.get('kwargs', {})) raise tornado.gen.Return(pub_data) @tornado.gen.coroutine def _disbatch_runner(self, chunk): ''' Disbatch runner client commands ''' full_return = chunk.pop('full_return', False) pub_data = self.saltclients['runner'](chunk) tag = pub_data['tag'] + '/ret' try: event = yield self.application.event_listener.get_event(self, tag=tag) # only return the return data ret = event if full_return else event['data']['return'] raise tornado.gen.Return(ret) except TimeoutException: raise tornado.gen.Return('Timeout waiting for runner to execute') @tornado.gen.coroutine # salt.utils.args.format_call doesn't work for functions having the # annotation tornado.gen.coroutine def _format_call_run_job_async(self, chunk): f_call = salt.utils.args.format_call( salt.client.LocalClient.run_job, chunk, is_class_method=True) f_call.get('kwargs', {})['io_loop'] = tornado.ioloop.IOLoop.current() return f_call
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
MinionSaltAPIHandler.get
python
def get(self, mid=None): # pylint: disable=W0221 ''' A convenience URL for getting lists of minions or getting minion details .. http:get:: /minions/(mid) :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/minions/ms-3 .. code-block:: text GET /minions/ms-3 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 129005 Content-Type: application/x-yaml return: - ms-3: grains.items: ... ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.lowstate = [{ 'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items', }] self.disbatch()
A convenience URL for getting lists of minions or getting minion details .. http:get:: /minions/(mid) :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/minions/ms-3 .. code-block:: text GET /minions/ms-3 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 129005 Content-Type: application/x-yaml return: - ms-3: grains.items: ...
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1165-L1214
[ "def _verify_auth(self):\n '''\n Boolean whether the request is auth'd\n '''\n\n return self.token and bool(self.application.auth.get_tok(self.token))\n" ]
class MinionSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' A convenience endpoint for minion related functions ''' @tornado.web.asynchronous @tornado.web.asynchronous def post(self): ''' Start an execution command and immediately return the job id .. http:post:: /minions :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. The ``client`` option will be set to :py:meth:`~salt.client.LocalClient.local_async`. **Example request:** .. code-block:: bash curl -sSi localhost:8000/minions \\ -H "Accept: application/x-yaml" \\ -d tgt='*' \\ -d fun='status.diskusage' .. code-block:: text POST /minions HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 26 Content-Type: application/x-www-form-urlencoded tgt=*&fun=status.diskusage **Example response:** .. code-block:: text HTTP/1.1 202 Accepted Content-Length: 86 Content-Type: application/x-yaml return: - jid: '20130603122505459265' minions: [ms-4, ms-3, ms-2, ms-1, ms-0] ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return # verify that all lowstates are the correct client type for low in self.lowstate: # if you didn't specify, its fine if 'client' not in low: low['client'] = 'local_async' continue # if you specified something else, we don't do that if low.get('client') != 'local_async': self.set_status(400) self.write('We don\'t serve your kind here') self.finish() return self.disbatch()
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
MinionSaltAPIHandler.post
python
def post(self): ''' Start an execution command and immediately return the job id .. http:post:: /minions :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. The ``client`` option will be set to :py:meth:`~salt.client.LocalClient.local_async`. **Example request:** .. code-block:: bash curl -sSi localhost:8000/minions \\ -H "Accept: application/x-yaml" \\ -d tgt='*' \\ -d fun='status.diskusage' .. code-block:: text POST /minions HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 26 Content-Type: application/x-www-form-urlencoded tgt=*&fun=status.diskusage **Example response:** .. code-block:: text HTTP/1.1 202 Accepted Content-Length: 86 Content-Type: application/x-yaml return: - jid: '20130603122505459265' minions: [ms-4, ms-3, ms-2, ms-1, ms-0] ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return # verify that all lowstates are the correct client type for low in self.lowstate: # if you didn't specify, its fine if 'client' not in low: low['client'] = 'local_async' continue # if you specified something else, we don't do that if low.get('client') != 'local_async': self.set_status(400) self.write('We don\'t serve your kind here') self.finish() return self.disbatch()
Start an execution command and immediately return the job id .. http:post:: /minions :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :reqheader Content-Type: |req_ct| :resheader Content-Type: |res_ct| :status 200: |200| :status 401: |401| :status 406: |406| :term:`lowstate` data describing Salt commands must be sent in the request body. The ``client`` option will be set to :py:meth:`~salt.client.LocalClient.local_async`. **Example request:** .. code-block:: bash curl -sSi localhost:8000/minions \\ -H "Accept: application/x-yaml" \\ -d tgt='*' \\ -d fun='status.diskusage' .. code-block:: text POST /minions HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml Content-Length: 26 Content-Type: application/x-www-form-urlencoded tgt=*&fun=status.diskusage **Example response:** .. code-block:: text HTTP/1.1 202 Accepted Content-Length: 86 Content-Type: application/x-yaml return: - jid: '20130603122505459265' minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1217-L1286
[ "def _verify_auth(self):\n '''\n Boolean whether the request is auth'd\n '''\n\n return self.token and bool(self.application.auth.get_tok(self.token))\n" ]
class MinionSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' A convenience endpoint for minion related functions ''' @tornado.web.asynchronous def get(self, mid=None): # pylint: disable=W0221 ''' A convenience URL for getting lists of minions or getting minion details .. http:get:: /minions/(mid) :reqheader X-Auth-Token: |req_token| :reqheader Accept: |req_accept| :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/minions/ms-3 .. code-block:: text GET /minions/ms-3 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 129005 Content-Type: application/x-yaml return: - ms-3: grains.items: ... ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return self.lowstate = [{ 'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items', }] self.disbatch() @tornado.web.asynchronous
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
JobsSaltAPIHandler.get
python
def get(self, jid=None): # pylint: disable=W0221 ''' A convenience URL for getting lists of previously run jobs or getting the return from a single job .. http:get:: /jobs/(jid) List jobs or show a single job from the job cache. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/jobs .. code-block:: text GET /jobs HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 165 Content-Type: application/x-yaml return: - '20121130104633606931': Arguments: - '3' Function: test.fib Start Time: 2012, Nov 30 10:46:33.606931 Target: jerry Target-type: glob **Example request:** .. code-block:: bash curl -i localhost:8000/jobs/20121130104633606931 .. code-block:: text GET /jobs/20121130104633606931 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml info: - Arguments: - '3' Function: test.fib Minions: - jerry Start Time: 2012, Nov 30 10:46:33.606931 Target: '*' Target-type: glob User: saltdev jid: '20121130104633606931' return: - jerry: - - 0 - 1 - 1 - 2 - 6.9141387939453125e-06 ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return if jid: self.lowstate = [{ 'fun': 'jobs.list_job', 'jid': jid, 'client': 'runner', }] else: self.lowstate = [{ 'fun': 'jobs.list_jobs', 'client': 'runner', }] self.disbatch()
A convenience URL for getting lists of previously run jobs or getting the return from a single job .. http:get:: /jobs/(jid) List jobs or show a single job from the job cache. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -i localhost:8000/jobs .. code-block:: text GET /jobs HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 165 Content-Type: application/x-yaml return: - '20121130104633606931': Arguments: - '3' Function: test.fib Start Time: 2012, Nov 30 10:46:33.606931 Target: jerry Target-type: glob **Example request:** .. code-block:: bash curl -i localhost:8000/jobs/20121130104633606931 .. code-block:: text GET /jobs/20121130104633606931 HTTP/1.1 Host: localhost:8000 Accept: application/x-yaml **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 73 Content-Type: application/x-yaml info: - Arguments: - '3' Function: test.fib Minions: - jerry Start Time: 2012, Nov 30 10:46:33.606931 Target: '*' Target-type: glob User: saltdev jid: '20121130104633606931' return: - jerry: - - 0 - 1 - 1 - 2 - 6.9141387939453125e-06
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1294-L1392
[ "def _verify_auth(self):\n '''\n Boolean whether the request is auth'd\n '''\n\n return self.token and bool(self.application.auth.get_tok(self.token))\n" ]
class JobsSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' A convenience endpoint for job cache data ''' @tornado.web.asynchronous
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
EventsSaltAPIHandler.get
python
def get(self): r''' An HTTP stream of the Salt master event bus This stream is formatted per the Server Sent Events (SSE) spec. Each event is formatted as JSON. .. http:get:: /events :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -NsS localhost:8000/events .. code-block:: text GET /events HTTP/1.1 Host: localhost:8000 **Example response:** .. code-block:: text HTTP/1.1 200 OK Connection: keep-alive Cache-Control: no-cache Content-Type: text/event-stream;charset=utf-8 retry: 400 data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}} data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}} The event stream can be easily consumed via JavaScript: .. code-block:: javascript # Note, you must be authenticated! var source = new EventSource('/events'); source.onopen = function() { console.debug('opening') }; source.onerror = function(e) { console.debug('error!', e) }; source.onmessage = function(e) { console.debug(e.data) }; Or using CORS: .. code-block:: javascript var source = new EventSource('/events', {withCredentials: true}); Some browser clients lack CORS support for the ``EventSource()`` API. Such clients may instead pass the :mailheader:`X-Auth-Token` value as an URL parameter: .. code-block:: bash curl -NsS localhost:8000/events/6d1b722e It is also possible to consume the stream via the shell. Records are separated by blank lines; the ``data:`` and ``tag:`` prefixes will need to be removed manually before attempting to unserialize the JSON. curl's ``-N`` flag turns off input buffering which is required to process the stream incrementally. Here is a basic example of printing each event as it comes in: .. code-block:: bash curl -NsS localhost:8000/events |\ while IFS= read -r line ; do echo $line done Here is an example of using awk to filter events based on tag: .. code-block:: bash curl -NsS localhost:8000/events |\ awk ' BEGIN { RS=""; FS="\\n" } $1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 } ' tag: salt/job/20140112010149808995/new data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}} tag: 20140112010149808995 data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}} ''' # if you aren't authenticated, redirect to login if not self._verify_auth(): self.redirect('/login') return # set the streaming headers self.set_header('Content-Type', 'text/event-stream') self.set_header('Cache-Control', 'no-cache') self.set_header('Connection', 'keep-alive') self.write('retry: {0}\n'.format(400)) self.flush() while True: try: event = yield self.application.event_listener.get_event(self) self.write('tag: {0}\n'.format(event.get('tag', ''))) self.write(str('data: {0}\n\n').format(_json_dumps(event))) # future lint: disable=blacklisted-function self.flush() except TimeoutException: break
r''' An HTTP stream of the Salt master event bus This stream is formatted per the Server Sent Events (SSE) spec. Each event is formatted as JSON. .. http:get:: /events :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -NsS localhost:8000/events .. code-block:: text GET /events HTTP/1.1 Host: localhost:8000 **Example response:** .. code-block:: text HTTP/1.1 200 OK Connection: keep-alive Cache-Control: no-cache Content-Type: text/event-stream;charset=utf-8 retry: 400 data: {'tag': '', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}} data: {'tag': '20130802115730568475', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}} The event stream can be easily consumed via JavaScript: .. code-block:: javascript # Note, you must be authenticated! var source = new EventSource('/events'); source.onopen = function() { console.debug('opening') }; source.onerror = function(e) { console.debug('error!', e) }; source.onmessage = function(e) { console.debug(e.data) }; Or using CORS: .. code-block:: javascript var source = new EventSource('/events', {withCredentials: true}); Some browser clients lack CORS support for the ``EventSource()`` API. Such clients may instead pass the :mailheader:`X-Auth-Token` value as an URL parameter: .. code-block:: bash curl -NsS localhost:8000/events/6d1b722e It is also possible to consume the stream via the shell. Records are separated by blank lines; the ``data:`` and ``tag:`` prefixes will need to be removed manually before attempting to unserialize the JSON. curl's ``-N`` flag turns off input buffering which is required to process the stream incrementally. Here is a basic example of printing each event as it comes in: .. code-block:: bash curl -NsS localhost:8000/events |\ while IFS= read -r line ; do echo $line done Here is an example of using awk to filter events based on tag: .. code-block:: bash curl -NsS localhost:8000/events |\ awk ' BEGIN { RS=""; FS="\\n" } $1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 } ' tag: salt/job/20140112010149808995/new data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}} tag: 20140112010149808995 data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1471-L1584
[ "def _json_dumps(obj, **kwargs):\n '''\n Invoke salt.utils.json.dumps using the alternate json module loaded using\n salt.utils.json.import_json(). This ensures that we properly encode any\n strings in the object before we perform the serialization.\n '''\n return salt.utils.json.dumps(obj, _json_module=json, **kwargs)\n", "def _verify_auth(self):\n '''\n Boolean whether the request is auth'd\n '''\n\n return self.token and bool(self.application.auth.get_tok(self.token))\n" ]
class EventsSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' Expose the Salt event bus The event bus on the Salt master exposes a large variety of things, notably when executions are started on the master and also when minions ultimately return their results. This URL provides a real-time window into a running Salt infrastructure. .. seealso:: :ref:`events` ''' @tornado.gen.coroutine
saltstack/salt
salt/netapi/rest_tornado/saltnado.py
WebhookSaltAPIHandler.post
python
def post(self, tag_suffix=None): # pylint: disable=W0221 ''' Fire an event in Salt with a custom event tag and data .. http:post:: /hook :status 200: |200| :status 401: |401| :status 406: |406| :status 413: request body is too large **Example request:** .. code-block:: bash curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!' .. code-block:: text POST /hook HTTP/1.1 Host: localhost:8000 Content-Length: 16 Content-Type: application/x-www-form-urlencoded foo=Foo&bar=Bar! **Example response**: .. code-block:: text HTTP/1.1 200 OK Content-Length: 14 Content-Type: application/json {"success": true} As a practical example, an internal continuous-integration build server could send an HTTP POST request to the URL ``http://localhost:8000/hook/mycompany/build/success`` which contains the result of a build and the SHA of the version that was built as JSON. That would then produce the following event in Salt that could be used to kick off a deployment via Salt's Reactor: .. code-block:: text Event fired at Fri Feb 14 17:40:11 2014 ************************* Tag: salt/netapi/hook/mycompany/build/success Data: {'_stamp': '2014-02-14_17:40:11.440996', 'headers': { 'X-My-Secret-Key': 'F0fAgoQjIT@W', 'Content-Length': '37', 'Content-Type': 'application/json', 'Host': 'localhost:8000', 'Remote-Addr': '127.0.0.1'}, 'post': {'revision': 'aa22a3c4b2e7', 'result': True}} Salt's Reactor could listen for the event: .. code-block:: yaml reactor: - 'salt/netapi/hook/mycompany/build/*': - /srv/reactor/react_ci_builds.sls And finally deploy the new build: .. code-block:: jinja {% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %} {% set build = data.get('post', {}) %} {% if secret_key == 'F0fAgoQjIT@W' and build.result == True %} deploy_my_app: cmd.state.sls: - tgt: 'application*' - arg: - myapp.deploy - kwarg: pillar: revision: {{ revision }} {% endif %} ''' disable_auth = self.application.mod_opts.get('webhook_disable_auth') if not disable_auth and not self._verify_auth(): self.redirect('/login') return # if you have the tag, prefix tag = 'salt/netapi/hook' if tag_suffix: tag += tag_suffix # TODO: consolidate?? self.event = salt.utils.event.get_event( 'master', self.application.opts['sock_dir'], self.application.opts['transport'], opts=self.application.opts, listen=False) arguments = {} for argname in self.request.query_arguments: value = self.get_arguments(argname) if len(value) == 1: value = value[0] arguments[argname] = value ret = self.event.fire_event({ 'post': self.request_payload, 'get': arguments, # In Tornado >= v4.0.3, the headers come # back as an HTTPHeaders instance, which # is a dictionary. We must cast this as # a dictionary in order for msgpack to # serialize it. 'headers': dict(self.request.headers), }, tag) self.write(self.serialize({'success': ret}))
Fire an event in Salt with a custom event tag and data .. http:post:: /hook :status 200: |200| :status 401: |401| :status 406: |406| :status 413: request body is too large **Example request:** .. code-block:: bash curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!' .. code-block:: text POST /hook HTTP/1.1 Host: localhost:8000 Content-Length: 16 Content-Type: application/x-www-form-urlencoded foo=Foo&bar=Bar! **Example response**: .. code-block:: text HTTP/1.1 200 OK Content-Length: 14 Content-Type: application/json {"success": true} As a practical example, an internal continuous-integration build server could send an HTTP POST request to the URL ``http://localhost:8000/hook/mycompany/build/success`` which contains the result of a build and the SHA of the version that was built as JSON. That would then produce the following event in Salt that could be used to kick off a deployment via Salt's Reactor: .. code-block:: text Event fired at Fri Feb 14 17:40:11 2014 ************************* Tag: salt/netapi/hook/mycompany/build/success Data: {'_stamp': '2014-02-14_17:40:11.440996', 'headers': { 'X-My-Secret-Key': 'F0fAgoQjIT@W', 'Content-Length': '37', 'Content-Type': 'application/json', 'Host': 'localhost:8000', 'Remote-Addr': '127.0.0.1'}, 'post': {'revision': 'aa22a3c4b2e7', 'result': True}} Salt's Reactor could listen for the event: .. code-block:: yaml reactor: - 'salt/netapi/hook/mycompany/build/*': - /srv/reactor/react_ci_builds.sls And finally deploy the new build: .. code-block:: jinja {% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %} {% set build = data.get('post', {}) %} {% if secret_key == 'F0fAgoQjIT@W' and build.result == True %} deploy_my_app: cmd.state.sls: - tgt: 'application*' - arg: - myapp.deploy - kwarg: pillar: revision: {{ revision }} {% endif %}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/saltnado.py#L1628-L1747
[ "def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n", "def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n", "def _verify_auth(self):\n '''\n Boolean whether the request is auth'd\n '''\n\n return self.token and bool(self.application.auth.get_tok(self.token))\n", "def serialize(self, data):\n '''\n Serlialize the output based on the Accept header\n '''\n self.set_header('Content-Type', self.content_type)\n\n return self.dumper(data)\n" ]
class WebhookSaltAPIHandler(SaltAPIHandler): # pylint: disable=W0223 ''' A generic web hook entry point that fires an event on Salt's event bus External services can POST data to this URL to trigger an event in Salt. For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks. .. note:: Be mindful of security Salt's Reactor can run any code. A Reactor SLS that responds to a hook event is responsible for validating that the event came from a trusted source and contains valid data. **This is a generic interface and securing it is up to you!** This URL requires authentication however not all external services can be configured to authenticate. For this reason authentication can be selectively disabled for this URL. Follow best practices -- always use SSL, pass a secret key, configure the firewall to only allow traffic from a known source, etc. The event data is taken from the request body. The :mailheader:`Content-Type` header is respected for the payload. The event tag is prefixed with ``salt/netapi/hook`` and the URL path is appended to the end. For example, a ``POST`` request sent to ``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag ``salt/netapi/hook/mycompany/myapp/mydata``. The following is an example ``.travis.yml`` file to send notifications to Salt of successful test runs: .. code-block:: yaml language: python script: python -m unittest tests after_success: - 'curl -sS http://saltapi-url.example.com:8000/hook/travis/build/success -d branch="${TRAVIS_BRANCH}" -d commit="${TRAVIS_COMMIT}"' .. seealso:: :ref:`events`, :ref:`reactor` '''
saltstack/salt
salt/beacons/proxy_example.py
beacon
python
def beacon(config): ''' Called several times each second https://docs.saltstack.com/en/latest/topics/beacons/#the-beacon-function .. code-block:: yaml beacons: proxy_example: - endpoint: beacon ''' # Important!!! # Although this toy example makes an HTTP call # to get beacon information # please be advised that doing CPU or IO intensive # operations in this method will cause the beacon loop # to block. _config = {} list(map(_config.update, config)) beacon_url = '{0}{1}'.format(__opts__['proxy']['url'], _config['endpoint']) ret = salt.utils.http.query(beacon_url, decode_type='json', decode=True) return [ret['dict']]
Called several times each second https://docs.saltstack.com/en/latest/topics/beacons/#the-beacon-function .. code-block:: yaml beacons: proxy_example: - endpoint: beacon
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/beacons/proxy_example.py#L46-L71
null
# -*- coding: utf-8 -*- ''' Example beacon to use with salt-proxy .. code-block:: yaml beacons: proxy_example: endpoint: beacon ''' # Import Python libs from __future__ import absolute_import, unicode_literals import logging # Import salt libs import salt.utils.http from salt.ext.six.moves import map # Important: If used with salt-proxy # this is required for the beacon to load!!! __proxyenabled__ = ['*'] __virtualname__ = 'proxy_example' log = logging.getLogger(__name__) def __virtual__(): ''' Trivially let the beacon load for the test example. For a production beacon we should probably have some expression here. ''' return True def validate(config): ''' Validate the beacon configuration ''' if not isinstance(config, list): return False, ('Configuration for proxy_example beacon must be a list.') return True, 'Valid beacon configuration'
saltstack/salt
salt/states/pdbedit.py
absent
python
def absent(name): ''' Ensure user account is absent name : string username ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # remove if needed if name in __salt__['pdbedit.list'](False): res = __salt__['pdbedit.delete'](name) if res[name] in ['deleted']: # check if we need to update changes ret['changes'].update(res) elif res[name] not in ['absent']: # oops something went wrong ret['result'] = False else: ret['comment'] = 'account {login} is absent'.format(login=name) return ret
Ensure user account is absent name : string username
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pdbedit.py#L53-L75
null
# -*- coding: utf-8 -*- ''' Manage accounts in Samba's passdb using pdbedit :maintainer: Jorge Schrauwen <sjorge@blackdot.be> :maturity: new :depends: pdbedit :platform: posix .. versionadded:: 2017.7.0 .. code-block:: yaml wash: pdbedit.absent kaylee: pdbedit.managed: - password: A70C708517B5DD0EDB67714FE25336EB - password_hashed: True - drive: 'X:' - homedir: '\\\\serenity\\mechanic\\profile' ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging # Import Salt libs import salt.utils.data log = logging.getLogger(__name__) # Define the state's virtual name __virtualname__ = 'pdbedit' def __virtual__(): ''' Provides pdbedit when available ''' if 'pdbedit.create' in __salt__: return True else: return ( False, '{0} state module can only be loaded when the pdbedit module is available'.format( __virtualname__ ) ) def managed(name, **kwargs): ''' Manage user account login : string login name password : string password password_hashed : boolean set if password is a nt hash instead of plain text domain : string users domain profile : string profile path script : string logon script drive : string home drive homedir : string home directory fullname : string full name account_desc : string account description machine_sid : string specify the machines new primary group SID or rid user_sid : string specify the users new primary group SID or rid account_control : string specify user account control properties .. note:: Only the following can be set: - N: No password required - D: Account disabled - H: Home directory required - L: Automatic Locking - X: Password does not expire reset_login_hours : boolean reset the users allowed logon hours reset_bad_password_count : boolean reset the stored bad login counter ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # save state saved = __salt__['pdbedit.list'](hashes=True) saved = saved[name] if name in saved else {} # call pdbedit.modify kwargs['login'] = name res = __salt__['pdbedit.modify'](**kwargs) # calculate changes if res[name] in ['created']: ret['changes'] = res elif res[name] in ['updated']: ret['changes'][name] = salt.utils.data.compare_dicts( saved, __salt__['pdbedit.list'](hashes=True)[name], ) elif res[name] not in ['unchanged']: ret['result'] = False ret['comment'] = res[name] return ret def present(name, **kwargs): ''' Alias for pdbedit.managed ''' return managed(name, **kwargs) # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
saltstack/salt
salt/states/pdbedit.py
managed
python
def managed(name, **kwargs): ''' Manage user account login : string login name password : string password password_hashed : boolean set if password is a nt hash instead of plain text domain : string users domain profile : string profile path script : string logon script drive : string home drive homedir : string home directory fullname : string full name account_desc : string account description machine_sid : string specify the machines new primary group SID or rid user_sid : string specify the users new primary group SID or rid account_control : string specify user account control properties .. note:: Only the following can be set: - N: No password required - D: Account disabled - H: Home directory required - L: Automatic Locking - X: Password does not expire reset_login_hours : boolean reset the users allowed logon hours reset_bad_password_count : boolean reset the stored bad login counter ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # save state saved = __salt__['pdbedit.list'](hashes=True) saved = saved[name] if name in saved else {} # call pdbedit.modify kwargs['login'] = name res = __salt__['pdbedit.modify'](**kwargs) # calculate changes if res[name] in ['created']: ret['changes'] = res elif res[name] in ['updated']: ret['changes'][name] = salt.utils.data.compare_dicts( saved, __salt__['pdbedit.list'](hashes=True)[name], ) elif res[name] not in ['unchanged']: ret['result'] = False ret['comment'] = res[name] return ret
Manage user account login : string login name password : string password password_hashed : boolean set if password is a nt hash instead of plain text domain : string users domain profile : string profile path script : string logon script drive : string home drive homedir : string home directory fullname : string full name account_desc : string account description machine_sid : string specify the machines new primary group SID or rid user_sid : string specify the users new primary group SID or rid account_control : string specify user account control properties .. note:: Only the following can be set: - N: No password required - D: Account disabled - H: Home directory required - L: Automatic Locking - X: Password does not expire reset_login_hours : boolean reset the users allowed logon hours reset_bad_password_count : boolean reset the stored bad login counter
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pdbedit.py#L78-L146
null
# -*- coding: utf-8 -*- ''' Manage accounts in Samba's passdb using pdbedit :maintainer: Jorge Schrauwen <sjorge@blackdot.be> :maturity: new :depends: pdbedit :platform: posix .. versionadded:: 2017.7.0 .. code-block:: yaml wash: pdbedit.absent kaylee: pdbedit.managed: - password: A70C708517B5DD0EDB67714FE25336EB - password_hashed: True - drive: 'X:' - homedir: '\\\\serenity\\mechanic\\profile' ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import logging # Import Salt libs import salt.utils.data log = logging.getLogger(__name__) # Define the state's virtual name __virtualname__ = 'pdbedit' def __virtual__(): ''' Provides pdbedit when available ''' if 'pdbedit.create' in __salt__: return True else: return ( False, '{0} state module can only be loaded when the pdbedit module is available'.format( __virtualname__ ) ) def absent(name): ''' Ensure user account is absent name : string username ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} # remove if needed if name in __salt__['pdbedit.list'](False): res = __salt__['pdbedit.delete'](name) if res[name] in ['deleted']: # check if we need to update changes ret['changes'].update(res) elif res[name] not in ['absent']: # oops something went wrong ret['result'] = False else: ret['comment'] = 'account {login} is absent'.format(login=name) return ret def present(name, **kwargs): ''' Alias for pdbedit.managed ''' return managed(name, **kwargs) # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
saltstack/salt
salt/modules/salt_proxy.py
_write_proxy_conf
python
def _write_proxy_conf(proxyfile): ''' write to file ''' msg = 'Invalid value for proxy file provided!, Supplied value = {0}' \ .format(proxyfile) log.trace('Salt Proxy Module: write proxy conf') if proxyfile: log.debug('Writing proxy conf file') with salt.utils.files.fopen(proxyfile, 'w') as proxy_conf: proxy_conf.write(salt.utils.stringutils.to_str('master = {0}' .format(__grains__['master']))) msg = 'Wrote proxy file {0}'.format(proxyfile) log.debug(msg) return msg
write to file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/salt_proxy.py#L24-L41
null
# -*- coding: utf-8 -*- ''' Salt proxy module .. versionadded:: 2015.8.3 Module to deploy and manage salt-proxy processes on a minion. ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import os import logging # Import Salt libs import salt.utils.files # Import 3rd-party libs import salt.ext.six.moves log = logging.getLogger(__name__) def _proxy_conf_file(proxyfile, test): ''' Check if proxy conf exists and update ''' changes_old = [] changes_new = [] success = True if not os.path.exists(proxyfile): try: if not test: changes_new.append(_write_proxy_conf(proxyfile)) msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile) else: msg = 'Salt Proxy: Update required to proxy conf {0}' \ .format(proxyfile) except (OSError, IOError) as err: success = False msg = 'Salt Proxy: Error writing proxy file {0}'.format(err) log.error(msg) changes_new.append(msg) changes_new.append(msg) log.debug(msg) else: msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile) changes_old.append(msg) log.debug(msg) return success, changes_new, changes_old def _is_proxy_running(proxyname): ''' Check if proxy for this name is running ''' cmd = ('ps ax | grep "salt-proxy --proxyid={0}" | grep -v grep' .format(salt.ext.six.moves.shlex_quote(proxyname))) cmdout = __salt__['cmd.run_all']( cmd, timeout=5, python_shell=True) if not cmdout['stdout']: return False else: return True def _proxy_process(proxyname, test): ''' Check and execute proxy process ''' changes_old = [] changes_new = [] if not _is_proxy_running(proxyname): if not test: __salt__['cmd.run_all']( 'salt-proxy --proxyid={0} -l info -d'.format(salt.ext.six.moves.shlex_quote(proxyname)), timeout=5) changes_new.append('Salt Proxy: Started proxy process for {0}' .format(proxyname)) else: changes_new.append('Salt Proxy: process {0} will be started' .format(proxyname)) else: changes_old.append('Salt Proxy: already running for {0}' .format(proxyname)) return True, changes_new, changes_old def configure_proxy(proxyname, start=True): ''' Create the salt proxy file and start the proxy process if required Parameters: proxyname: Name to be used for this proxy (should match entries in pillar) start: Boolean indicating if the process should be started default = True CLI Example: .. code-block:: bash salt deviceminion salt_proxy.configure_proxy p8000 ''' changes_new = [] changes_old = [] status_file = True test = __opts__['test'] # write the proxy file if necessary proxyfile = '/etc/salt/proxy' status_file, msg_new, msg_old = _proxy_conf_file(proxyfile, test) changes_new.extend(msg_new) changes_old.extend(msg_old) status_proc = False # start the proxy process if start: status_proc, msg_new, msg_old = _proxy_process(proxyname, test) changes_old.extend(msg_old) changes_new.extend(msg_new) else: changes_old.append('Start is False, not starting salt-proxy process') log.debug('Process not started') return { 'result': status_file and status_proc, 'changes': { 'old': '\n'.join(changes_old), 'new': '\n'.join(changes_new), }, } def is_running(proxyname): ''' Check if the salt-proxy process associated with this proxy (name) is running. Returns True if the process is running False otherwise Parameters: proxyname: String name of the proxy (p8000 for example) CLI Example: .. code-block:: bash salt deviceminion salt_proxy.is_running p8000 ''' return {'result': _is_proxy_running(proxyname)}
saltstack/salt
salt/modules/salt_proxy.py
_proxy_conf_file
python
def _proxy_conf_file(proxyfile, test): ''' Check if proxy conf exists and update ''' changes_old = [] changes_new = [] success = True if not os.path.exists(proxyfile): try: if not test: changes_new.append(_write_proxy_conf(proxyfile)) msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile) else: msg = 'Salt Proxy: Update required to proxy conf {0}' \ .format(proxyfile) except (OSError, IOError) as err: success = False msg = 'Salt Proxy: Error writing proxy file {0}'.format(err) log.error(msg) changes_new.append(msg) changes_new.append(msg) log.debug(msg) else: msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile) changes_old.append(msg) log.debug(msg) return success, changes_new, changes_old
Check if proxy conf exists and update
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/salt_proxy.py#L44-L70
[ "def _write_proxy_conf(proxyfile):\n '''\n write to file\n '''\n msg = 'Invalid value for proxy file provided!, Supplied value = {0}' \\\n .format(proxyfile)\n\n log.trace('Salt Proxy Module: write proxy conf')\n\n if proxyfile:\n log.debug('Writing proxy conf file')\n with salt.utils.files.fopen(proxyfile, 'w') as proxy_conf:\n proxy_conf.write(salt.utils.stringutils.to_str('master = {0}'\n .format(__grains__['master'])))\n msg = 'Wrote proxy file {0}'.format(proxyfile)\n log.debug(msg)\n\n return msg\n" ]
# -*- coding: utf-8 -*- ''' Salt proxy module .. versionadded:: 2015.8.3 Module to deploy and manage salt-proxy processes on a minion. ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import os import logging # Import Salt libs import salt.utils.files # Import 3rd-party libs import salt.ext.six.moves log = logging.getLogger(__name__) def _write_proxy_conf(proxyfile): ''' write to file ''' msg = 'Invalid value for proxy file provided!, Supplied value = {0}' \ .format(proxyfile) log.trace('Salt Proxy Module: write proxy conf') if proxyfile: log.debug('Writing proxy conf file') with salt.utils.files.fopen(proxyfile, 'w') as proxy_conf: proxy_conf.write(salt.utils.stringutils.to_str('master = {0}' .format(__grains__['master']))) msg = 'Wrote proxy file {0}'.format(proxyfile) log.debug(msg) return msg def _is_proxy_running(proxyname): ''' Check if proxy for this name is running ''' cmd = ('ps ax | grep "salt-proxy --proxyid={0}" | grep -v grep' .format(salt.ext.six.moves.shlex_quote(proxyname))) cmdout = __salt__['cmd.run_all']( cmd, timeout=5, python_shell=True) if not cmdout['stdout']: return False else: return True def _proxy_process(proxyname, test): ''' Check and execute proxy process ''' changes_old = [] changes_new = [] if not _is_proxy_running(proxyname): if not test: __salt__['cmd.run_all']( 'salt-proxy --proxyid={0} -l info -d'.format(salt.ext.six.moves.shlex_quote(proxyname)), timeout=5) changes_new.append('Salt Proxy: Started proxy process for {0}' .format(proxyname)) else: changes_new.append('Salt Proxy: process {0} will be started' .format(proxyname)) else: changes_old.append('Salt Proxy: already running for {0}' .format(proxyname)) return True, changes_new, changes_old def configure_proxy(proxyname, start=True): ''' Create the salt proxy file and start the proxy process if required Parameters: proxyname: Name to be used for this proxy (should match entries in pillar) start: Boolean indicating if the process should be started default = True CLI Example: .. code-block:: bash salt deviceminion salt_proxy.configure_proxy p8000 ''' changes_new = [] changes_old = [] status_file = True test = __opts__['test'] # write the proxy file if necessary proxyfile = '/etc/salt/proxy' status_file, msg_new, msg_old = _proxy_conf_file(proxyfile, test) changes_new.extend(msg_new) changes_old.extend(msg_old) status_proc = False # start the proxy process if start: status_proc, msg_new, msg_old = _proxy_process(proxyname, test) changes_old.extend(msg_old) changes_new.extend(msg_new) else: changes_old.append('Start is False, not starting salt-proxy process') log.debug('Process not started') return { 'result': status_file and status_proc, 'changes': { 'old': '\n'.join(changes_old), 'new': '\n'.join(changes_new), }, } def is_running(proxyname): ''' Check if the salt-proxy process associated with this proxy (name) is running. Returns True if the process is running False otherwise Parameters: proxyname: String name of the proxy (p8000 for example) CLI Example: .. code-block:: bash salt deviceminion salt_proxy.is_running p8000 ''' return {'result': _is_proxy_running(proxyname)}
saltstack/salt
salt/modules/salt_proxy.py
_is_proxy_running
python
def _is_proxy_running(proxyname): ''' Check if proxy for this name is running ''' cmd = ('ps ax | grep "salt-proxy --proxyid={0}" | grep -v grep' .format(salt.ext.six.moves.shlex_quote(proxyname))) cmdout = __salt__['cmd.run_all']( cmd, timeout=5, python_shell=True) if not cmdout['stdout']: return False else: return True
Check if proxy for this name is running
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/salt_proxy.py#L73-L86
null
# -*- coding: utf-8 -*- ''' Salt proxy module .. versionadded:: 2015.8.3 Module to deploy and manage salt-proxy processes on a minion. ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import os import logging # Import Salt libs import salt.utils.files # Import 3rd-party libs import salt.ext.six.moves log = logging.getLogger(__name__) def _write_proxy_conf(proxyfile): ''' write to file ''' msg = 'Invalid value for proxy file provided!, Supplied value = {0}' \ .format(proxyfile) log.trace('Salt Proxy Module: write proxy conf') if proxyfile: log.debug('Writing proxy conf file') with salt.utils.files.fopen(proxyfile, 'w') as proxy_conf: proxy_conf.write(salt.utils.stringutils.to_str('master = {0}' .format(__grains__['master']))) msg = 'Wrote proxy file {0}'.format(proxyfile) log.debug(msg) return msg def _proxy_conf_file(proxyfile, test): ''' Check if proxy conf exists and update ''' changes_old = [] changes_new = [] success = True if not os.path.exists(proxyfile): try: if not test: changes_new.append(_write_proxy_conf(proxyfile)) msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile) else: msg = 'Salt Proxy: Update required to proxy conf {0}' \ .format(proxyfile) except (OSError, IOError) as err: success = False msg = 'Salt Proxy: Error writing proxy file {0}'.format(err) log.error(msg) changes_new.append(msg) changes_new.append(msg) log.debug(msg) else: msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile) changes_old.append(msg) log.debug(msg) return success, changes_new, changes_old def _proxy_process(proxyname, test): ''' Check and execute proxy process ''' changes_old = [] changes_new = [] if not _is_proxy_running(proxyname): if not test: __salt__['cmd.run_all']( 'salt-proxy --proxyid={0} -l info -d'.format(salt.ext.six.moves.shlex_quote(proxyname)), timeout=5) changes_new.append('Salt Proxy: Started proxy process for {0}' .format(proxyname)) else: changes_new.append('Salt Proxy: process {0} will be started' .format(proxyname)) else: changes_old.append('Salt Proxy: already running for {0}' .format(proxyname)) return True, changes_new, changes_old def configure_proxy(proxyname, start=True): ''' Create the salt proxy file and start the proxy process if required Parameters: proxyname: Name to be used for this proxy (should match entries in pillar) start: Boolean indicating if the process should be started default = True CLI Example: .. code-block:: bash salt deviceminion salt_proxy.configure_proxy p8000 ''' changes_new = [] changes_old = [] status_file = True test = __opts__['test'] # write the proxy file if necessary proxyfile = '/etc/salt/proxy' status_file, msg_new, msg_old = _proxy_conf_file(proxyfile, test) changes_new.extend(msg_new) changes_old.extend(msg_old) status_proc = False # start the proxy process if start: status_proc, msg_new, msg_old = _proxy_process(proxyname, test) changes_old.extend(msg_old) changes_new.extend(msg_new) else: changes_old.append('Start is False, not starting salt-proxy process') log.debug('Process not started') return { 'result': status_file and status_proc, 'changes': { 'old': '\n'.join(changes_old), 'new': '\n'.join(changes_new), }, } def is_running(proxyname): ''' Check if the salt-proxy process associated with this proxy (name) is running. Returns True if the process is running False otherwise Parameters: proxyname: String name of the proxy (p8000 for example) CLI Example: .. code-block:: bash salt deviceminion salt_proxy.is_running p8000 ''' return {'result': _is_proxy_running(proxyname)}
saltstack/salt
salt/modules/salt_proxy.py
_proxy_process
python
def _proxy_process(proxyname, test): ''' Check and execute proxy process ''' changes_old = [] changes_new = [] if not _is_proxy_running(proxyname): if not test: __salt__['cmd.run_all']( 'salt-proxy --proxyid={0} -l info -d'.format(salt.ext.six.moves.shlex_quote(proxyname)), timeout=5) changes_new.append('Salt Proxy: Started proxy process for {0}' .format(proxyname)) else: changes_new.append('Salt Proxy: process {0} will be started' .format(proxyname)) else: changes_old.append('Salt Proxy: already running for {0}' .format(proxyname)) return True, changes_new, changes_old
Check and execute proxy process
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/salt_proxy.py#L89-L108
[ "def _is_proxy_running(proxyname):\n '''\n Check if proxy for this name is running\n '''\n cmd = ('ps ax | grep \"salt-proxy --proxyid={0}\" | grep -v grep'\n .format(salt.ext.six.moves.shlex_quote(proxyname)))\n cmdout = __salt__['cmd.run_all'](\n cmd,\n timeout=5,\n python_shell=True)\n if not cmdout['stdout']:\n return False\n else:\n return True\n" ]
# -*- coding: utf-8 -*- ''' Salt proxy module .. versionadded:: 2015.8.3 Module to deploy and manage salt-proxy processes on a minion. ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import os import logging # Import Salt libs import salt.utils.files # Import 3rd-party libs import salt.ext.six.moves log = logging.getLogger(__name__) def _write_proxy_conf(proxyfile): ''' write to file ''' msg = 'Invalid value for proxy file provided!, Supplied value = {0}' \ .format(proxyfile) log.trace('Salt Proxy Module: write proxy conf') if proxyfile: log.debug('Writing proxy conf file') with salt.utils.files.fopen(proxyfile, 'w') as proxy_conf: proxy_conf.write(salt.utils.stringutils.to_str('master = {0}' .format(__grains__['master']))) msg = 'Wrote proxy file {0}'.format(proxyfile) log.debug(msg) return msg def _proxy_conf_file(proxyfile, test): ''' Check if proxy conf exists and update ''' changes_old = [] changes_new = [] success = True if not os.path.exists(proxyfile): try: if not test: changes_new.append(_write_proxy_conf(proxyfile)) msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile) else: msg = 'Salt Proxy: Update required to proxy conf {0}' \ .format(proxyfile) except (OSError, IOError) as err: success = False msg = 'Salt Proxy: Error writing proxy file {0}'.format(err) log.error(msg) changes_new.append(msg) changes_new.append(msg) log.debug(msg) else: msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile) changes_old.append(msg) log.debug(msg) return success, changes_new, changes_old def _is_proxy_running(proxyname): ''' Check if proxy for this name is running ''' cmd = ('ps ax | grep "salt-proxy --proxyid={0}" | grep -v grep' .format(salt.ext.six.moves.shlex_quote(proxyname))) cmdout = __salt__['cmd.run_all']( cmd, timeout=5, python_shell=True) if not cmdout['stdout']: return False else: return True def configure_proxy(proxyname, start=True): ''' Create the salt proxy file and start the proxy process if required Parameters: proxyname: Name to be used for this proxy (should match entries in pillar) start: Boolean indicating if the process should be started default = True CLI Example: .. code-block:: bash salt deviceminion salt_proxy.configure_proxy p8000 ''' changes_new = [] changes_old = [] status_file = True test = __opts__['test'] # write the proxy file if necessary proxyfile = '/etc/salt/proxy' status_file, msg_new, msg_old = _proxy_conf_file(proxyfile, test) changes_new.extend(msg_new) changes_old.extend(msg_old) status_proc = False # start the proxy process if start: status_proc, msg_new, msg_old = _proxy_process(proxyname, test) changes_old.extend(msg_old) changes_new.extend(msg_new) else: changes_old.append('Start is False, not starting salt-proxy process') log.debug('Process not started') return { 'result': status_file and status_proc, 'changes': { 'old': '\n'.join(changes_old), 'new': '\n'.join(changes_new), }, } def is_running(proxyname): ''' Check if the salt-proxy process associated with this proxy (name) is running. Returns True if the process is running False otherwise Parameters: proxyname: String name of the proxy (p8000 for example) CLI Example: .. code-block:: bash salt deviceminion salt_proxy.is_running p8000 ''' return {'result': _is_proxy_running(proxyname)}
saltstack/salt
salt/modules/salt_proxy.py
configure_proxy
python
def configure_proxy(proxyname, start=True): ''' Create the salt proxy file and start the proxy process if required Parameters: proxyname: Name to be used for this proxy (should match entries in pillar) start: Boolean indicating if the process should be started default = True CLI Example: .. code-block:: bash salt deviceminion salt_proxy.configure_proxy p8000 ''' changes_new = [] changes_old = [] status_file = True test = __opts__['test'] # write the proxy file if necessary proxyfile = '/etc/salt/proxy' status_file, msg_new, msg_old = _proxy_conf_file(proxyfile, test) changes_new.extend(msg_new) changes_old.extend(msg_old) status_proc = False # start the proxy process if start: status_proc, msg_new, msg_old = _proxy_process(proxyname, test) changes_old.extend(msg_old) changes_new.extend(msg_new) else: changes_old.append('Start is False, not starting salt-proxy process') log.debug('Process not started') return { 'result': status_file and status_proc, 'changes': { 'old': '\n'.join(changes_old), 'new': '\n'.join(changes_new), }, }
Create the salt proxy file and start the proxy process if required Parameters: proxyname: Name to be used for this proxy (should match entries in pillar) start: Boolean indicating if the process should be started default = True CLI Example: .. code-block:: bash salt deviceminion salt_proxy.configure_proxy p8000
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/salt_proxy.py#L111-L156
[ "def _proxy_conf_file(proxyfile, test):\n '''\n Check if proxy conf exists and update\n '''\n changes_old = []\n changes_new = []\n success = True\n if not os.path.exists(proxyfile):\n try:\n if not test:\n changes_new.append(_write_proxy_conf(proxyfile))\n msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile)\n else:\n msg = 'Salt Proxy: Update required to proxy conf {0}' \\\n .format(proxyfile)\n except (OSError, IOError) as err:\n success = False\n msg = 'Salt Proxy: Error writing proxy file {0}'.format(err)\n log.error(msg)\n changes_new.append(msg)\n changes_new.append(msg)\n log.debug(msg)\n else:\n msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile)\n changes_old.append(msg)\n log.debug(msg)\n return success, changes_new, changes_old\n", "def _proxy_process(proxyname, test):\n '''\n Check and execute proxy process\n '''\n changes_old = []\n changes_new = []\n if not _is_proxy_running(proxyname):\n if not test:\n __salt__['cmd.run_all'](\n 'salt-proxy --proxyid={0} -l info -d'.format(salt.ext.six.moves.shlex_quote(proxyname)),\n timeout=5)\n changes_new.append('Salt Proxy: Started proxy process for {0}'\n .format(proxyname))\n else:\n changes_new.append('Salt Proxy: process {0} will be started'\n .format(proxyname))\n else:\n changes_old.append('Salt Proxy: already running for {0}'\n .format(proxyname))\n return True, changes_new, changes_old\n" ]
# -*- coding: utf-8 -*- ''' Salt proxy module .. versionadded:: 2015.8.3 Module to deploy and manage salt-proxy processes on a minion. ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function import os import logging # Import Salt libs import salt.utils.files # Import 3rd-party libs import salt.ext.six.moves log = logging.getLogger(__name__) def _write_proxy_conf(proxyfile): ''' write to file ''' msg = 'Invalid value for proxy file provided!, Supplied value = {0}' \ .format(proxyfile) log.trace('Salt Proxy Module: write proxy conf') if proxyfile: log.debug('Writing proxy conf file') with salt.utils.files.fopen(proxyfile, 'w') as proxy_conf: proxy_conf.write(salt.utils.stringutils.to_str('master = {0}' .format(__grains__['master']))) msg = 'Wrote proxy file {0}'.format(proxyfile) log.debug(msg) return msg def _proxy_conf_file(proxyfile, test): ''' Check if proxy conf exists and update ''' changes_old = [] changes_new = [] success = True if not os.path.exists(proxyfile): try: if not test: changes_new.append(_write_proxy_conf(proxyfile)) msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile) else: msg = 'Salt Proxy: Update required to proxy conf {0}' \ .format(proxyfile) except (OSError, IOError) as err: success = False msg = 'Salt Proxy: Error writing proxy file {0}'.format(err) log.error(msg) changes_new.append(msg) changes_new.append(msg) log.debug(msg) else: msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile) changes_old.append(msg) log.debug(msg) return success, changes_new, changes_old def _is_proxy_running(proxyname): ''' Check if proxy for this name is running ''' cmd = ('ps ax | grep "salt-proxy --proxyid={0}" | grep -v grep' .format(salt.ext.six.moves.shlex_quote(proxyname))) cmdout = __salt__['cmd.run_all']( cmd, timeout=5, python_shell=True) if not cmdout['stdout']: return False else: return True def _proxy_process(proxyname, test): ''' Check and execute proxy process ''' changes_old = [] changes_new = [] if not _is_proxy_running(proxyname): if not test: __salt__['cmd.run_all']( 'salt-proxy --proxyid={0} -l info -d'.format(salt.ext.six.moves.shlex_quote(proxyname)), timeout=5) changes_new.append('Salt Proxy: Started proxy process for {0}' .format(proxyname)) else: changes_new.append('Salt Proxy: process {0} will be started' .format(proxyname)) else: changes_old.append('Salt Proxy: already running for {0}' .format(proxyname)) return True, changes_new, changes_old def is_running(proxyname): ''' Check if the salt-proxy process associated with this proxy (name) is running. Returns True if the process is running False otherwise Parameters: proxyname: String name of the proxy (p8000 for example) CLI Example: .. code-block:: bash salt deviceminion salt_proxy.is_running p8000 ''' return {'result': _is_proxy_running(proxyname)}
saltstack/salt
salt/netapi/rest_tornado/event_processor.py
SaltInfo.publish_minions
python
def publish_minions(self): ''' Publishes minions as a list of dicts. ''' log.debug('in publish minions') minions = {} log.debug('starting loop') for minion, minion_info in six.iteritems(self.minions): log.debug(minion) # log.debug(minion_info) curr_minion = {} curr_minion.update(minion_info) curr_minion.update({'id': minion}) minions[minion] = curr_minion log.debug('ended loop') ret = {'minions': minions} self.handler.write_message( salt.utils.json.dumps(ret) + str('\n\n'))
Publishes minions as a list of dicts.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/event_processor.py#L32-L50
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
class SaltInfo(object): ''' Class to handle processing and publishing of "real time" Salt upates. ''' def __init__(self, handler): ''' handler is expected to be the server side end of a websocket connection. ''' self.handler = handler # These represent a "real time" view into Salt's jobs. self.jobs = {} # This represents a "real time" view of minions connected to # Salt. self.minions = {} # future lint: disable=blacklisted-function def publish(self, key, data): ''' Publishes the data to the event stream. ''' publish_data = {key: data} pub = salt.utils.json.dumps(publish_data) + str('\n\n') # future lint: disable=blacklisted-function self.handler.write_message(pub) def process_minion_update(self, event_data): ''' Associate grains data with a minion and publish minion update ''' tag = event_data['tag'] event_info = event_data['data'] mid = tag.split('/')[-1] if not self.minions.get(mid, None): self.minions[mid] = {} minion = self.minions[mid] minion.update({'grains': event_info['return']}) log.debug("In process minion grains update with minions=%s", self.minions) self.publish_minions() def process_ret_job_event(self, event_data): ''' Process a /ret event returned by Salt for a particular minion. These events contain the returned results from a particular execution. ''' tag = event_data['tag'] event_info = event_data['data'] _, _, jid, _, mid = tag.split('/') job = self.jobs.setdefault(jid, {}) minion = job.setdefault('minions', {}).setdefault(mid, {}) minion.update({'return': event_info['return']}) minion.update({'retcode': event_info['retcode']}) minion.update({'success': event_info['success']}) job_complete = all([minion['success'] for mid, minion in six.iteritems(job['minions'])]) if job_complete: job['state'] = 'complete' self.publish('jobs', self.jobs) def process_new_job_event(self, event_data): ''' Creates a new job with properties from the event data like jid, function, args, timestamp. Also sets the initial state to started. Minions that are participating in this job are also noted. ''' job = None tag = event_data['tag'] event_info = event_data['data'] minions = {} for mid in event_info['minions']: minions[mid] = {'success': False} job = { 'jid': event_info['jid'], 'start_time': event_info['_stamp'], 'minions': minions, # is a dictionary keyed by mids 'fun': event_info['fun'], 'tgt': event_info['tgt'], 'tgt_type': event_info['tgt_type'], 'state': 'running', } self.jobs[event_info['jid']] = job self.publish('jobs', self.jobs) def process_key_event(self, event_data): ''' Tag: salt/key Data: {'_stamp': '2014-05-20T22:45:04.345583', 'act': 'delete', 'id': 'compute.home', 'result': True} ''' tag = event_data['tag'] event_info = event_data['data'] if event_info['act'] == 'delete': self.minions.pop(event_info['id'], None) elif event_info['act'] == 'accept': self.minions.setdefault(event_info['id'], {}) self.publish_minions() def process_presence_events(self, salt_data, token, opts): ''' Check if any minions have connected or dropped. Send a message to the client if they have. ''' log.debug('In presence') changed = False # check if any connections were dropped if set(salt_data['data'].get('lost', [])): dropped_minions = set(salt_data['data'].get('lost', [])) else: dropped_minions = set(self.minions) - set(salt_data['data'].get('present', [])) for minion in dropped_minions: changed = True log.debug('Popping %s', minion) self.minions.pop(minion, None) # check if any new connections were made if set(salt_data['data'].get('new', [])): log.debug('got new minions') new_minions = set(salt_data['data'].get('new', [])) changed = True elif set(salt_data['data'].get('present', [])) - set(self.minions): log.debug('detected new minions') new_minions = set(salt_data['data'].get('present', [])) - set(self.minions) changed = True else: new_minions = [] tgt = ','.join(new_minions) for mid in new_minions: log.debug('Adding minion') self.minions[mid] = {} if tgt: changed = True client = salt.netapi.NetapiClient(opts) client.run( { 'fun': 'grains.items', 'tgt': tgt, 'expr_type': 'list', 'mode': 'client', 'client': 'local', 'asynchronous': 'local_async', 'token': token, }) if changed: self.publish_minions() def process(self, salt_data, token, opts): ''' Process events and publish data ''' log.debug('In process %s', threading.current_thread()) log.debug(salt_data['tag']) log.debug(salt_data) parts = salt_data['tag'].split('/') if len(parts) < 2: return # TBD: Simplify these conditional expressions if parts[1] == 'job': log.debug('In job part 1') if parts[3] == 'new': log.debug('In new job') self.process_new_job_event(salt_data) # if salt_data['data']['fun'] == 'grains.items': # self.minions = {} elif parts[3] == 'ret': log.debug('In ret') self.process_ret_job_event(salt_data) if salt_data['data']['fun'] == 'grains.items': self.process_minion_update(salt_data) elif parts[1] == 'key': log.debug('In key') self.process_key_event(salt_data) elif parts[1] == 'presence': self.process_presence_events(salt_data, token, opts)
saltstack/salt
salt/netapi/rest_tornado/event_processor.py
SaltInfo.publish
python
def publish(self, key, data): ''' Publishes the data to the event stream. ''' publish_data = {key: data} pub = salt.utils.json.dumps(publish_data) + str('\n\n') # future lint: disable=blacklisted-function self.handler.write_message(pub)
Publishes the data to the event stream.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/rest_tornado/event_processor.py#L52-L58
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
class SaltInfo(object): ''' Class to handle processing and publishing of "real time" Salt upates. ''' def __init__(self, handler): ''' handler is expected to be the server side end of a websocket connection. ''' self.handler = handler # These represent a "real time" view into Salt's jobs. self.jobs = {} # This represents a "real time" view of minions connected to # Salt. self.minions = {} def publish_minions(self): ''' Publishes minions as a list of dicts. ''' log.debug('in publish minions') minions = {} log.debug('starting loop') for minion, minion_info in six.iteritems(self.minions): log.debug(minion) # log.debug(minion_info) curr_minion = {} curr_minion.update(minion_info) curr_minion.update({'id': minion}) minions[minion] = curr_minion log.debug('ended loop') ret = {'minions': minions} self.handler.write_message( salt.utils.json.dumps(ret) + str('\n\n')) # future lint: disable=blacklisted-function def process_minion_update(self, event_data): ''' Associate grains data with a minion and publish minion update ''' tag = event_data['tag'] event_info = event_data['data'] mid = tag.split('/')[-1] if not self.minions.get(mid, None): self.minions[mid] = {} minion = self.minions[mid] minion.update({'grains': event_info['return']}) log.debug("In process minion grains update with minions=%s", self.minions) self.publish_minions() def process_ret_job_event(self, event_data): ''' Process a /ret event returned by Salt for a particular minion. These events contain the returned results from a particular execution. ''' tag = event_data['tag'] event_info = event_data['data'] _, _, jid, _, mid = tag.split('/') job = self.jobs.setdefault(jid, {}) minion = job.setdefault('minions', {}).setdefault(mid, {}) minion.update({'return': event_info['return']}) minion.update({'retcode': event_info['retcode']}) minion.update({'success': event_info['success']}) job_complete = all([minion['success'] for mid, minion in six.iteritems(job['minions'])]) if job_complete: job['state'] = 'complete' self.publish('jobs', self.jobs) def process_new_job_event(self, event_data): ''' Creates a new job with properties from the event data like jid, function, args, timestamp. Also sets the initial state to started. Minions that are participating in this job are also noted. ''' job = None tag = event_data['tag'] event_info = event_data['data'] minions = {} for mid in event_info['minions']: minions[mid] = {'success': False} job = { 'jid': event_info['jid'], 'start_time': event_info['_stamp'], 'minions': minions, # is a dictionary keyed by mids 'fun': event_info['fun'], 'tgt': event_info['tgt'], 'tgt_type': event_info['tgt_type'], 'state': 'running', } self.jobs[event_info['jid']] = job self.publish('jobs', self.jobs) def process_key_event(self, event_data): ''' Tag: salt/key Data: {'_stamp': '2014-05-20T22:45:04.345583', 'act': 'delete', 'id': 'compute.home', 'result': True} ''' tag = event_data['tag'] event_info = event_data['data'] if event_info['act'] == 'delete': self.minions.pop(event_info['id'], None) elif event_info['act'] == 'accept': self.minions.setdefault(event_info['id'], {}) self.publish_minions() def process_presence_events(self, salt_data, token, opts): ''' Check if any minions have connected or dropped. Send a message to the client if they have. ''' log.debug('In presence') changed = False # check if any connections were dropped if set(salt_data['data'].get('lost', [])): dropped_minions = set(salt_data['data'].get('lost', [])) else: dropped_minions = set(self.minions) - set(salt_data['data'].get('present', [])) for minion in dropped_minions: changed = True log.debug('Popping %s', minion) self.minions.pop(minion, None) # check if any new connections were made if set(salt_data['data'].get('new', [])): log.debug('got new minions') new_minions = set(salt_data['data'].get('new', [])) changed = True elif set(salt_data['data'].get('present', [])) - set(self.minions): log.debug('detected new minions') new_minions = set(salt_data['data'].get('present', [])) - set(self.minions) changed = True else: new_minions = [] tgt = ','.join(new_minions) for mid in new_minions: log.debug('Adding minion') self.minions[mid] = {} if tgt: changed = True client = salt.netapi.NetapiClient(opts) client.run( { 'fun': 'grains.items', 'tgt': tgt, 'expr_type': 'list', 'mode': 'client', 'client': 'local', 'asynchronous': 'local_async', 'token': token, }) if changed: self.publish_minions() def process(self, salt_data, token, opts): ''' Process events and publish data ''' log.debug('In process %s', threading.current_thread()) log.debug(salt_data['tag']) log.debug(salt_data) parts = salt_data['tag'].split('/') if len(parts) < 2: return # TBD: Simplify these conditional expressions if parts[1] == 'job': log.debug('In job part 1') if parts[3] == 'new': log.debug('In new job') self.process_new_job_event(salt_data) # if salt_data['data']['fun'] == 'grains.items': # self.minions = {} elif parts[3] == 'ret': log.debug('In ret') self.process_ret_job_event(salt_data) if salt_data['data']['fun'] == 'grains.items': self.process_minion_update(salt_data) elif parts[1] == 'key': log.debug('In key') self.process_key_event(salt_data) elif parts[1] == 'presence': self.process_presence_events(salt_data, token, opts)