repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/proxy/philips_hue.py
call_brightness
python
def call_brightness(*args, **kwargs): ''' Set an effect to the lamp. Arguments: * **value**: 0~255 brightness of the lamp. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **transition**: Transition 0~200. Default 0. CLI Example: .. code-block:: bash salt '*' hue.brightness value=100 salt '*' hue.brightness id=1 value=150 salt '*' hue.brightness id=1,2,3 value=255 ''' res = dict() if 'value' not in kwargs: raise CommandExecutionError("Parameter 'value' is missing") try: brightness = max(min(int(kwargs['value']), 244), 1) except Exception as err: raise CommandExecutionError("Parameter 'value' does not contains an integer") try: transition = max(min(int(kwargs['transition']), 200), 0) except Exception as err: transition = 0 devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"bri": brightness, "transitiontime": transition}) return res
Set an effect to the lamp. Arguments: * **value**: 0~255 brightness of the lamp. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **transition**: Transition 0~200. Default 0. CLI Example: .. code-block:: bash salt '*' hue.brightness value=100 salt '*' hue.brightness id=1 value=150 salt '*' hue.brightness id=1,2,3 value=255
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/philips_hue.py#L457-L497
[ "def _set(lamp_id, state, method=\"state\"):\n '''\n Set state to the device by ID.\n\n :param lamp_id:\n :param state:\n :return:\n '''\n try:\n res = _query(lamp_id, state, action=method, method='PUT')\n except Exception as err:\n raise CommandExecutionError(err)\n\n res = len(res) > 1 and res[-1] or res[0]\n if res.get('success'):\n res = {'result': True}\n elif res.get('error'):\n res = {'result': False,\n 'description': res['error']['description'],\n 'type': res['error']['type']}\n\n return res\n", "def _get_devices(params):\n '''\n Parse device(s) ID(s) from the common params.\n\n :param params:\n :return:\n '''\n if 'id' not in params:\n raise CommandExecutionError(\"Parameter ID is required.\")\n\n return type(params['id']) == int and [params['id']] \\\n or [int(dev) for dev in params['id'].split(\",\")]\n", "def _get_lights():\n '''\n Get all available lighting devices.\n '''\n return _query(None, None)\n" ]
# -*- coding: utf-8 -*- # # Copyright 2015 SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Philips HUE lamps module for proxy. .. versionadded:: 2015.8.3 First create a new user on the Hue bridge by following the `Meet hue <https://www.developers.meethue.com/documentation/getting-started>`_ instructions. To configure the proxy minion: .. code-block:: yaml proxy: proxytype: philips_hue host: [hostname or ip] user: [username] ''' # pylint: disable=import-error,no-name-in-module,redefined-builtin from __future__ import absolute_import, print_function, unicode_literals import salt.ext.six.moves.http_client as http_client # Import python libs import logging import time import salt.utils.json from salt.exceptions import (CommandExecutionError, MinionError) from salt.ext import six __proxyenabled__ = ['philips_hue'] CONFIG = {} log = logging.getLogger(__file__) class Const(object): ''' Constants for the lamp operations. ''' LAMP_ON = {"on": True, "transitiontime": 0} LAMP_OFF = {"on": False, "transitiontime": 0} COLOR_WHITE = {"xy": [0.3227, 0.329]} COLOR_DAYLIGHT = {"xy": [0.3806, 0.3576]} COLOR_RED = {"hue": 0, "sat": 254} COLOR_GREEN = {"hue": 25500, "sat": 254} COLOR_ORANGE = {"hue": 12000, "sat": 254} COLOR_PINK = {"xy": [0.3688, 0.2095]} COLOR_BLUE = {"hue": 46920, "sat": 254} COLOR_YELLOW = {"xy": [0.4432, 0.5154]} COLOR_PURPLE = {"xy": [0.3787, 0.1724]} def __virtual__(): ''' Validate the module. ''' return True def init(cnf): ''' Initialize the module. ''' CONFIG['host'] = cnf.get('proxy', {}).get('host') if not CONFIG['host']: raise MinionError(message="Cannot find 'host' parameter in the proxy configuration") CONFIG['user'] = cnf.get('proxy', {}).get('user') if not CONFIG['user']: raise MinionError(message="Cannot find 'user' parameter in the proxy configuration") CONFIG['uri'] = "/api/{0}".format(CONFIG['user']) def ping(*args, **kw): ''' Ping the lamps. ''' # Here blink them return True def shutdown(opts, *args, **kw): ''' Shuts down the service. ''' # This is no-op method, which is required but makes nothing at this point. return True def _query(lamp_id, state, action='', method='GET'): ''' Query the URI :return: ''' # Because salt.utils.query is that dreadful... :( err = None url = "{0}/lights{1}".format(CONFIG['uri'], lamp_id and '/{0}'.format(lamp_id) or '') \ + (action and "/{0}".format(action) or '') conn = http_client.HTTPConnection(CONFIG['host']) if method == 'PUT': conn.request(method, url, salt.utils.json.dumps(state)) else: conn.request(method, url) resp = conn.getresponse() if resp.status == http_client.OK: res = salt.utils.json.loads(resp.read()) else: err = "HTTP error: {0}, {1}".format(resp.status, resp.reason) conn.close() if err: raise CommandExecutionError(err) return res def _set(lamp_id, state, method="state"): ''' Set state to the device by ID. :param lamp_id: :param state: :return: ''' try: res = _query(lamp_id, state, action=method, method='PUT') except Exception as err: raise CommandExecutionError(err) res = len(res) > 1 and res[-1] or res[0] if res.get('success'): res = {'result': True} elif res.get('error'): res = {'result': False, 'description': res['error']['description'], 'type': res['error']['type']} return res def _get_devices(params): ''' Parse device(s) ID(s) from the common params. :param params: :return: ''' if 'id' not in params: raise CommandExecutionError("Parameter ID is required.") return type(params['id']) == int and [params['id']] \ or [int(dev) for dev in params['id'].split(",")] def _get_lights(): ''' Get all available lighting devices. ''' return _query(None, None) # Callers def call_lights(*args, **kwargs): ''' Get info about all available lamps. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.lights salt '*' hue.lights id=1 salt '*' hue.lights id=1,2,3 ''' res = dict() lights = _get_lights() for dev_id in 'id' in kwargs and _get_devices(kwargs) or sorted(lights.keys()): if lights.get(six.text_type(dev_id)): res[dev_id] = lights[six.text_type(dev_id)] return res or False def call_switch(*args, **kwargs): ''' Switch lamp ON/OFF. If no particular state is passed, then lamp will be switched to the opposite state. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **on**: True or False. Inverted current, if omitted CLI Example: .. code-block:: bash salt '*' hue.switch salt '*' hue.switch id=1 salt '*' hue.switch id=1,2,3 on=True ''' out = dict() devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): if 'on' in kwargs: state = kwargs['on'] and Const.LAMP_ON or Const.LAMP_OFF else: # Invert the current state state = devices[six.text_type(dev_id)]['state']['on'] and Const.LAMP_OFF or Const.LAMP_ON out[dev_id] = _set(dev_id, state) return out def call_blink(*args, **kwargs): ''' Blink a lamp. If lamp is ON, then blink ON-OFF-ON, otherwise OFF-ON-OFF. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **pause**: Time in seconds. Can be less than 1, i.e. 0.7, 0.5 sec. CLI Example: .. code-block:: bash salt '*' hue.blink id=1 salt '*' hue.blink id=1,2,3 ''' devices = _get_lights() pause = kwargs.get('pause', 0) res = dict() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): state = devices[six.text_type(dev_id)]['state']['on'] _set(dev_id, state and Const.LAMP_OFF or Const.LAMP_ON) if pause: time.sleep(pause) res[dev_id] = _set(dev_id, not state and Const.LAMP_OFF or Const.LAMP_ON) return res def call_ping(*args, **kwargs): ''' Ping the lamps by issuing a short inversion blink to all available devices. CLI Example: .. code-block:: bash salt '*' hue.ping ''' errors = dict() for dev_id, dev_status in call_blink().items(): if not dev_status['result']: errors[dev_id] = False return errors or True def call_status(*args, **kwargs): ''' Return the status of the lamps. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.status salt '*' hue.status id=1 salt '*' hue.status id=1,2,3 ''' res = dict() devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): dev_id = six.text_type(dev_id) res[dev_id] = { 'on': devices[dev_id]['state']['on'], 'reachable': devices[dev_id]['state']['reachable'] } return res def call_rename(*args, **kwargs): ''' Rename a device. Options: * **id**: Specifies a device ID. Only one device at a time. * **title**: Title of the device. CLI Example: .. code-block:: bash salt '*' hue.rename id=1 title='WC for cats' ''' dev_id = _get_devices(kwargs) if len(dev_id) > 1: raise CommandExecutionError("Only one device can be renamed at a time") if 'title' not in kwargs: raise CommandExecutionError("Title is missing") return _set(dev_id[0], {"name": kwargs['title']}, method="") def call_alert(*args, **kwargs): ''' Lamp alert Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **on**: Turns on or off an alert. Default is True. CLI Example: .. code-block:: bash salt '*' hue.alert salt '*' hue.alert id=1 salt '*' hue.alert id=1,2,3 on=false ''' res = dict() devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"alert": kwargs.get("on", True) and "lselect" or "none"}) return res def call_effect(*args, **kwargs): ''' Set an effect to the lamp. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **type**: Type of the effect. Possible values are "none" or "colorloop". Default "none". CLI Example: .. code-block:: bash salt '*' hue.effect salt '*' hue.effect id=1 salt '*' hue.effect id=1,2,3 type=colorloop ''' res = dict() devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"effect": kwargs.get("type", "none")}) return res def call_color(*args, **kwargs): ''' Set a color to the lamp. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **color**: Fixed color. Values are: red, green, blue, orange, pink, white, yellow, daylight, purple. Default white. * **transition**: Transition 0~200. Advanced: * **gamut**: XY coordinates. Use gamut according to the Philips HUE devices documentation. More: http://www.developers.meethue.com/documentation/hue-xy-values CLI Example: .. code-block:: bash salt '*' hue.color salt '*' hue.color id=1 salt '*' hue.color id=1,2,3 oolor=red transition=30 salt '*' hue.color id=1 gamut=0.3,0.5 ''' res = dict() colormap = { 'red': Const.COLOR_RED, 'green': Const.COLOR_GREEN, 'blue': Const.COLOR_BLUE, 'orange': Const.COLOR_ORANGE, 'pink': Const.COLOR_PINK, 'white': Const.COLOR_WHITE, 'yellow': Const.COLOR_YELLOW, 'daylight': Const.COLOR_DAYLIGHT, 'purple': Const.COLOR_PURPLE, } devices = _get_lights() color = kwargs.get("gamut") if color: color = color.split(",") if len(color) == 2: try: color = {"xy": [float(color[0]), float(color[1])]} except Exception as ex: color = None else: color = None if not color: color = colormap.get(kwargs.get("color", 'white'), Const.COLOR_WHITE) color.update({"transitiontime": max(min(kwargs.get("transition", 0), 200), 0)}) for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, color) return res def call_temperature(*args, **kwargs): ''' Set the mired color temperature. More: http://en.wikipedia.org/wiki/Mired Arguments: * **value**: 150~500. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.temperature value=150 salt '*' hue.temperature value=150 id=1 salt '*' hue.temperature value=150 id=1,2,3 ''' res = dict() if 'value' not in kwargs: raise CommandExecutionError("Parameter 'value' (150~500) is missing") try: value = max(min(int(kwargs['value']), 500), 150) except Exception as err: raise CommandExecutionError("Parameter 'value' does not contains an integer") devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"ct": value}) return res
saltstack/salt
salt/proxy/philips_hue.py
call_temperature
python
def call_temperature(*args, **kwargs): ''' Set the mired color temperature. More: http://en.wikipedia.org/wiki/Mired Arguments: * **value**: 150~500. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.temperature value=150 salt '*' hue.temperature value=150 id=1 salt '*' hue.temperature value=150 id=1,2,3 ''' res = dict() if 'value' not in kwargs: raise CommandExecutionError("Parameter 'value' (150~500) is missing") try: value = max(min(int(kwargs['value']), 500), 150) except Exception as err: raise CommandExecutionError("Parameter 'value' does not contains an integer") devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"ct": value}) return res
Set the mired color temperature. More: http://en.wikipedia.org/wiki/Mired Arguments: * **value**: 150~500. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.temperature value=150 salt '*' hue.temperature value=150 id=1 salt '*' hue.temperature value=150 id=1,2,3
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/philips_hue.py#L500-L533
[ "def _set(lamp_id, state, method=\"state\"):\n '''\n Set state to the device by ID.\n\n :param lamp_id:\n :param state:\n :return:\n '''\n try:\n res = _query(lamp_id, state, action=method, method='PUT')\n except Exception as err:\n raise CommandExecutionError(err)\n\n res = len(res) > 1 and res[-1] or res[0]\n if res.get('success'):\n res = {'result': True}\n elif res.get('error'):\n res = {'result': False,\n 'description': res['error']['description'],\n 'type': res['error']['type']}\n\n return res\n", "def _get_devices(params):\n '''\n Parse device(s) ID(s) from the common params.\n\n :param params:\n :return:\n '''\n if 'id' not in params:\n raise CommandExecutionError(\"Parameter ID is required.\")\n\n return type(params['id']) == int and [params['id']] \\\n or [int(dev) for dev in params['id'].split(\",\")]\n", "def _get_lights():\n '''\n Get all available lighting devices.\n '''\n return _query(None, None)\n" ]
# -*- coding: utf-8 -*- # # Copyright 2015 SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Philips HUE lamps module for proxy. .. versionadded:: 2015.8.3 First create a new user on the Hue bridge by following the `Meet hue <https://www.developers.meethue.com/documentation/getting-started>`_ instructions. To configure the proxy minion: .. code-block:: yaml proxy: proxytype: philips_hue host: [hostname or ip] user: [username] ''' # pylint: disable=import-error,no-name-in-module,redefined-builtin from __future__ import absolute_import, print_function, unicode_literals import salt.ext.six.moves.http_client as http_client # Import python libs import logging import time import salt.utils.json from salt.exceptions import (CommandExecutionError, MinionError) from salt.ext import six __proxyenabled__ = ['philips_hue'] CONFIG = {} log = logging.getLogger(__file__) class Const(object): ''' Constants for the lamp operations. ''' LAMP_ON = {"on": True, "transitiontime": 0} LAMP_OFF = {"on": False, "transitiontime": 0} COLOR_WHITE = {"xy": [0.3227, 0.329]} COLOR_DAYLIGHT = {"xy": [0.3806, 0.3576]} COLOR_RED = {"hue": 0, "sat": 254} COLOR_GREEN = {"hue": 25500, "sat": 254} COLOR_ORANGE = {"hue": 12000, "sat": 254} COLOR_PINK = {"xy": [0.3688, 0.2095]} COLOR_BLUE = {"hue": 46920, "sat": 254} COLOR_YELLOW = {"xy": [0.4432, 0.5154]} COLOR_PURPLE = {"xy": [0.3787, 0.1724]} def __virtual__(): ''' Validate the module. ''' return True def init(cnf): ''' Initialize the module. ''' CONFIG['host'] = cnf.get('proxy', {}).get('host') if not CONFIG['host']: raise MinionError(message="Cannot find 'host' parameter in the proxy configuration") CONFIG['user'] = cnf.get('proxy', {}).get('user') if not CONFIG['user']: raise MinionError(message="Cannot find 'user' parameter in the proxy configuration") CONFIG['uri'] = "/api/{0}".format(CONFIG['user']) def ping(*args, **kw): ''' Ping the lamps. ''' # Here blink them return True def shutdown(opts, *args, **kw): ''' Shuts down the service. ''' # This is no-op method, which is required but makes nothing at this point. return True def _query(lamp_id, state, action='', method='GET'): ''' Query the URI :return: ''' # Because salt.utils.query is that dreadful... :( err = None url = "{0}/lights{1}".format(CONFIG['uri'], lamp_id and '/{0}'.format(lamp_id) or '') \ + (action and "/{0}".format(action) or '') conn = http_client.HTTPConnection(CONFIG['host']) if method == 'PUT': conn.request(method, url, salt.utils.json.dumps(state)) else: conn.request(method, url) resp = conn.getresponse() if resp.status == http_client.OK: res = salt.utils.json.loads(resp.read()) else: err = "HTTP error: {0}, {1}".format(resp.status, resp.reason) conn.close() if err: raise CommandExecutionError(err) return res def _set(lamp_id, state, method="state"): ''' Set state to the device by ID. :param lamp_id: :param state: :return: ''' try: res = _query(lamp_id, state, action=method, method='PUT') except Exception as err: raise CommandExecutionError(err) res = len(res) > 1 and res[-1] or res[0] if res.get('success'): res = {'result': True} elif res.get('error'): res = {'result': False, 'description': res['error']['description'], 'type': res['error']['type']} return res def _get_devices(params): ''' Parse device(s) ID(s) from the common params. :param params: :return: ''' if 'id' not in params: raise CommandExecutionError("Parameter ID is required.") return type(params['id']) == int and [params['id']] \ or [int(dev) for dev in params['id'].split(",")] def _get_lights(): ''' Get all available lighting devices. ''' return _query(None, None) # Callers def call_lights(*args, **kwargs): ''' Get info about all available lamps. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.lights salt '*' hue.lights id=1 salt '*' hue.lights id=1,2,3 ''' res = dict() lights = _get_lights() for dev_id in 'id' in kwargs and _get_devices(kwargs) or sorted(lights.keys()): if lights.get(six.text_type(dev_id)): res[dev_id] = lights[six.text_type(dev_id)] return res or False def call_switch(*args, **kwargs): ''' Switch lamp ON/OFF. If no particular state is passed, then lamp will be switched to the opposite state. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **on**: True or False. Inverted current, if omitted CLI Example: .. code-block:: bash salt '*' hue.switch salt '*' hue.switch id=1 salt '*' hue.switch id=1,2,3 on=True ''' out = dict() devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): if 'on' in kwargs: state = kwargs['on'] and Const.LAMP_ON or Const.LAMP_OFF else: # Invert the current state state = devices[six.text_type(dev_id)]['state']['on'] and Const.LAMP_OFF or Const.LAMP_ON out[dev_id] = _set(dev_id, state) return out def call_blink(*args, **kwargs): ''' Blink a lamp. If lamp is ON, then blink ON-OFF-ON, otherwise OFF-ON-OFF. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **pause**: Time in seconds. Can be less than 1, i.e. 0.7, 0.5 sec. CLI Example: .. code-block:: bash salt '*' hue.blink id=1 salt '*' hue.blink id=1,2,3 ''' devices = _get_lights() pause = kwargs.get('pause', 0) res = dict() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): state = devices[six.text_type(dev_id)]['state']['on'] _set(dev_id, state and Const.LAMP_OFF or Const.LAMP_ON) if pause: time.sleep(pause) res[dev_id] = _set(dev_id, not state and Const.LAMP_OFF or Const.LAMP_ON) return res def call_ping(*args, **kwargs): ''' Ping the lamps by issuing a short inversion blink to all available devices. CLI Example: .. code-block:: bash salt '*' hue.ping ''' errors = dict() for dev_id, dev_status in call_blink().items(): if not dev_status['result']: errors[dev_id] = False return errors or True def call_status(*args, **kwargs): ''' Return the status of the lamps. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. CLI Example: .. code-block:: bash salt '*' hue.status salt '*' hue.status id=1 salt '*' hue.status id=1,2,3 ''' res = dict() devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): dev_id = six.text_type(dev_id) res[dev_id] = { 'on': devices[dev_id]['state']['on'], 'reachable': devices[dev_id]['state']['reachable'] } return res def call_rename(*args, **kwargs): ''' Rename a device. Options: * **id**: Specifies a device ID. Only one device at a time. * **title**: Title of the device. CLI Example: .. code-block:: bash salt '*' hue.rename id=1 title='WC for cats' ''' dev_id = _get_devices(kwargs) if len(dev_id) > 1: raise CommandExecutionError("Only one device can be renamed at a time") if 'title' not in kwargs: raise CommandExecutionError("Title is missing") return _set(dev_id[0], {"name": kwargs['title']}, method="") def call_alert(*args, **kwargs): ''' Lamp alert Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **on**: Turns on or off an alert. Default is True. CLI Example: .. code-block:: bash salt '*' hue.alert salt '*' hue.alert id=1 salt '*' hue.alert id=1,2,3 on=false ''' res = dict() devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"alert": kwargs.get("on", True) and "lselect" or "none"}) return res def call_effect(*args, **kwargs): ''' Set an effect to the lamp. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **type**: Type of the effect. Possible values are "none" or "colorloop". Default "none". CLI Example: .. code-block:: bash salt '*' hue.effect salt '*' hue.effect id=1 salt '*' hue.effect id=1,2,3 type=colorloop ''' res = dict() devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"effect": kwargs.get("type", "none")}) return res def call_color(*args, **kwargs): ''' Set a color to the lamp. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **color**: Fixed color. Values are: red, green, blue, orange, pink, white, yellow, daylight, purple. Default white. * **transition**: Transition 0~200. Advanced: * **gamut**: XY coordinates. Use gamut according to the Philips HUE devices documentation. More: http://www.developers.meethue.com/documentation/hue-xy-values CLI Example: .. code-block:: bash salt '*' hue.color salt '*' hue.color id=1 salt '*' hue.color id=1,2,3 oolor=red transition=30 salt '*' hue.color id=1 gamut=0.3,0.5 ''' res = dict() colormap = { 'red': Const.COLOR_RED, 'green': Const.COLOR_GREEN, 'blue': Const.COLOR_BLUE, 'orange': Const.COLOR_ORANGE, 'pink': Const.COLOR_PINK, 'white': Const.COLOR_WHITE, 'yellow': Const.COLOR_YELLOW, 'daylight': Const.COLOR_DAYLIGHT, 'purple': Const.COLOR_PURPLE, } devices = _get_lights() color = kwargs.get("gamut") if color: color = color.split(",") if len(color) == 2: try: color = {"xy": [float(color[0]), float(color[1])]} except Exception as ex: color = None else: color = None if not color: color = colormap.get(kwargs.get("color", 'white'), Const.COLOR_WHITE) color.update({"transitiontime": max(min(kwargs.get("transition", 0), 200), 0)}) for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, color) return res def call_brightness(*args, **kwargs): ''' Set an effect to the lamp. Arguments: * **value**: 0~255 brightness of the lamp. Options: * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted. * **transition**: Transition 0~200. Default 0. CLI Example: .. code-block:: bash salt '*' hue.brightness value=100 salt '*' hue.brightness id=1 value=150 salt '*' hue.brightness id=1,2,3 value=255 ''' res = dict() if 'value' not in kwargs: raise CommandExecutionError("Parameter 'value' is missing") try: brightness = max(min(int(kwargs['value']), 244), 1) except Exception as err: raise CommandExecutionError("Parameter 'value' does not contains an integer") try: transition = max(min(int(kwargs['transition']), 200), 0) except Exception as err: transition = 0 devices = _get_lights() for dev_id in 'id' not in kwargs and sorted(devices.keys()) or _get_devices(kwargs): res[dev_id] = _set(dev_id, {"bri": brightness, "transitiontime": transition}) return res
saltstack/salt
salt/fileserver/hgfs.py
_get_branch
python
def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False
Find the requested branch in the specified repo
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L111-L118
null
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
_get_bookmark
python
def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False
Find the requested bookmark in the specified repo
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L132-L139
null
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
_get_tag
python
def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False
Find the requested tag in the specified repo
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L154-L161
null
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
_get_ref
python
def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False
Return ref tuple if ref is in the repo.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L164-L181
null
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
init
python
def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos
Return a list of hglib objects for the various hgfs remotes
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L193-L341
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def repack_dictlist(data,\n strict=False,\n recurse=False,\n key_cb=None,\n val_cb=None):\n '''\n Takes a list of one-element dicts (as found in many SLS schemas) and\n repacks into a single dictionary.\n '''\n if isinstance(data, six.string_types):\n try:\n data = salt.utils.yaml.safe_load(data)\n except salt.utils.yaml.parser.ParserError as err:\n log.error(err)\n return {}\n\n if key_cb is None:\n key_cb = lambda x: x\n if val_cb is None:\n val_cb = lambda x, y: y\n\n valid_non_dict = (six.string_types, six.integer_types, float)\n if isinstance(data, list):\n for element in data:\n if isinstance(element, valid_non_dict):\n continue\n elif isinstance(element, dict):\n if len(element) != 1:\n log.error(\n 'Invalid input for repack_dictlist: key/value pairs '\n 'must contain only one element (data passed: %s).',\n element\n )\n return {}\n else:\n log.error(\n 'Invalid input for repack_dictlist: element %s is '\n 'not a string/dict/numeric value', element\n )\n return {}\n else:\n log.error(\n 'Invalid input for repack_dictlist, data passed is not a list '\n '(%s)', data\n )\n return {}\n\n ret = {}\n for element in data:\n if isinstance(element, valid_non_dict):\n ret[key_cb(element)] = None\n else:\n key = next(iter(element))\n val = element[key]\n if is_dictlist(val):\n if recurse:\n ret[key_cb(key)] = repack_dictlist(val, recurse=recurse)\n elif strict:\n log.error(\n 'Invalid input for repack_dictlist: nested dictlist '\n 'found, but recurse is set to False'\n )\n return {}\n else:\n ret[key_cb(key)] = val_cb(key, val)\n else:\n ret[key_cb(key)] = val_cb(key, val)\n return ret\n", "def to_str(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str, bytes, bytearray, or unicode (py2), return str\n '''\n def _normalize(s):\n try:\n return unicodedata.normalize('NFC', s) if normalize else s\n except TypeError:\n return s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n # This shouldn't be six.string_types because if we're on PY2 and we already\n # have a string, we should just return it.\n if isinstance(s, str):\n return _normalize(s)\n\n exc = None\n if six.PY3:\n if isinstance(s, (bytes, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytes, or bytearray not {}'.format(type(s)))\n else:\n if isinstance(s, bytearray):\n return str(s) # future lint: disable=blacklisted-function\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n for enc in encoding:\n try:\n return _normalize(s).encode(enc, errors)\n except UnicodeEncodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str, bytearray, or unicode')\n", "def strip_proto(url):\n '''\n Return a copy of the string with the protocol designation stripped, if one\n was present.\n '''\n return re.sub('^[^:/]+://', '', url)\n", "def _failhard():\n '''\n Fatal fileserver configuration issue, raise an exception\n '''\n raise FileserverConfigError(\n 'Failed to load hg fileserver backend'\n )\n" ]
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
lock
python
def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors
Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L457-L501
[ "def init():\n '''\n Return a list of hglib objects for the various hgfs remotes\n '''\n bp_ = os.path.join(__opts__['cachedir'], 'hgfs')\n new_remote = False\n repos = []\n\n per_remote_defaults = {}\n for param in PER_REMOTE_OVERRIDES:\n per_remote_defaults[param] = \\\n six.text_type(__opts__['hgfs_{0}'.format(param)])\n\n for remote in __opts__['hgfs_remotes']:\n repo_conf = copy.deepcopy(per_remote_defaults)\n if isinstance(remote, dict):\n repo_url = next(iter(remote))\n per_remote_conf = dict(\n [(key, six.text_type(val)) for key, val in\n six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]\n )\n if not per_remote_conf:\n log.error(\n 'Invalid per-remote configuration for hgfs remote %s. If '\n 'no per-remote parameters are being specified, there may '\n 'be a trailing colon after the URL, which should be '\n 'removed. Check the master configuration file.', repo_url\n )\n _failhard()\n\n branch_method = \\\n per_remote_conf.get('branch_method',\n per_remote_defaults['branch_method'])\n if branch_method not in VALID_BRANCH_METHODS:\n log.error(\n 'Invalid branch_method \\'%s\\' for remote %s. Valid '\n 'branch methods are: %s. This remote will be ignored.',\n branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS)\n )\n _failhard()\n\n per_remote_errors = False\n for param in (x for x in per_remote_conf\n if x not in PER_REMOTE_OVERRIDES):\n log.error(\n 'Invalid configuration parameter \\'%s\\' for remote %s. '\n 'Valid parameters are: %s. See the documentation for '\n 'further information.',\n param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)\n )\n per_remote_errors = True\n if per_remote_errors:\n _failhard()\n\n repo_conf.update(per_remote_conf)\n else:\n repo_url = remote\n\n if not isinstance(repo_url, six.string_types):\n log.error(\n 'Invalid hgfs remote %s. Remotes must be strings, you may '\n 'need to enclose the URL in quotes', repo_url\n )\n _failhard()\n\n try:\n repo_conf['mountpoint'] = salt.utils.url.strip_proto(\n repo_conf['mountpoint']\n )\n except TypeError:\n # mountpoint not specified\n pass\n\n hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))\n repo_hash = hash_type(repo_url).hexdigest()\n rp_ = os.path.join(bp_, repo_hash)\n if not os.path.isdir(rp_):\n os.makedirs(rp_)\n\n if not os.listdir(rp_):\n # Only init if the directory is empty.\n hglib.init(rp_)\n new_remote = True\n try:\n repo = hglib.open(rp_)\n except hglib.error.ServerError:\n log.error(\n 'Cache path %s (corresponding remote: %s) exists but is not '\n 'a valid mercurial repository. You will need to manually '\n 'delete this directory on the master to continue to use this '\n 'hgfs remote.', rp_, repo_url\n )\n _failhard()\n except Exception as exc:\n log.error(\n 'Exception \\'%s\\' encountered while initializing hgfs '\n 'remote %s', exc, repo_url\n )\n _failhard()\n\n try:\n refs = repo.config(names='paths')\n except hglib.error.CommandError:\n refs = None\n\n # Do NOT put this if statement inside the except block above. Earlier\n # versions of hglib did not raise an exception, so we need to do it\n # this way to support both older and newer hglib.\n if not refs:\n # Write an hgrc defining the remote URL\n hgconfpath = os.path.join(rp_, '.hg', 'hgrc')\n with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig:\n hgconfig.write('[paths]\\n')\n hgconfig.write(\n salt.utils.stringutils.to_str(\n 'default = {0}\\n'.format(repo_url)\n )\n )\n\n repo_conf.update({\n 'repo': repo,\n 'url': repo_url,\n 'hash': repo_hash,\n 'cachedir': rp_,\n 'lockfile': os.path.join(__opts__['cachedir'],\n 'hgfs',\n '{0}.update.lk'.format(repo_hash))\n })\n repos.append(repo_conf)\n repo.close()\n\n if new_remote:\n remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt')\n try:\n with salt.utils.files.fopen(remote_map, 'w+') as fp_:\n timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')\n fp_.write('# hgfs_remote map as of {0}\\n'.format(timestamp))\n for repo in repos:\n fp_.write(\n salt.utils.stringutils.to_str(\n '{0} = {1}\\n'.format(repo['hash'], repo['url'])\n )\n )\n except OSError:\n pass\n else:\n log.info('Wrote new hgfs_remote map to %s', remote_map)\n\n return repos\n", "def _do_lock(repo):\n success = []\n failed = []\n if not os.path.exists(repo['lockfile']):\n try:\n with salt.utils.files.fopen(repo['lockfile'], 'w'):\n pass\n except (IOError, OSError) as exc:\n msg = ('Unable to set update lock for {0} ({1}): {2} '\n .format(repo['url'], repo['lockfile'], exc))\n log.debug(msg)\n failed.append(msg)\n else:\n msg = 'Set lock for {0}'.format(repo['url'])\n log.debug(msg)\n success.append(msg)\n return success, failed\n" ]
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
update
python
def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass
Execute an hg pull on all of the repos
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L504-L575
[ "def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n", "def lock(remote=None):\n '''\n Place an update.lk\n\n ``remote`` can either be a dictionary containing repo configuration\n information, or a pattern. If the latter, then remotes for which the URL\n matches the pattern will be locked.\n '''\n def _do_lock(repo):\n success = []\n failed = []\n if not os.path.exists(repo['lockfile']):\n try:\n with salt.utils.files.fopen(repo['lockfile'], 'w'):\n pass\n except (IOError, OSError) as exc:\n msg = ('Unable to set update lock for {0} ({1}): {2} '\n .format(repo['url'], repo['lockfile'], exc))\n log.debug(msg)\n failed.append(msg)\n else:\n msg = 'Set lock for {0}'.format(repo['url'])\n log.debug(msg)\n success.append(msg)\n return success, failed\n\n if isinstance(remote, dict):\n return _do_lock(remote)\n\n locked = []\n errors = []\n for repo in init():\n if remote:\n try:\n if not fnmatch.fnmatch(repo['url'], remote):\n continue\n except TypeError:\n # remote was non-string, try again\n if not fnmatch.fnmatch(repo['url'], six.text_type(remote)):\n continue\n success, failed = _do_lock(repo)\n locked.extend(success)\n errors.extend(failed)\n\n return locked, errors\n", "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def envs(ignore_cache=False):\n '''\n Return a list of refs that can be used as environments\n '''\n if not ignore_cache:\n env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p')\n cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)\n if cache_match is not None:\n return cache_match\n ret = set()\n for repo in init():\n repo['repo'].open()\n if repo['branch_method'] in ('branches', 'mixed'):\n for branch in _all_branches(repo['repo']):\n branch_name = branch[0]\n if branch_name == repo['base']:\n branch_name = 'base'\n ret.add(branch_name)\n if repo['branch_method'] in ('bookmarks', 'mixed'):\n for bookmark in _all_bookmarks(repo['repo']):\n bookmark_name = bookmark[0]\n if bookmark_name == repo['base']:\n bookmark_name = 'base'\n ret.add(bookmark_name)\n ret.update([x[0] for x in _all_tags(repo['repo'])])\n repo['repo'].close()\n return [x for x in sorted(ret) if _env_is_exposed(x)]\n", "def tagify(suffix='', prefix='', base=SALT):\n '''\n convenience function to build a namespaced event tag string\n from joining with the TABPART character the base, prefix and suffix\n\n If string prefix is a valid key in TAGS Then use the value of key prefix\n Else use prefix string\n\n If suffix is a list Then join all string elements of suffix individually\n Else use string suffix\n\n '''\n parts = [base, TAGS.get(prefix, prefix)]\n if hasattr(suffix, 'append'): # list so extend parts\n parts.extend(suffix)\n else: # string so append\n parts.append(suffix)\n\n for index, _ in enumerate(parts):\n try:\n parts[index] = salt.utils.stringutils.to_str(parts[index])\n except TypeError:\n parts[index] = str(parts[index])\n return TAGPARTER.join([part for part in parts if part])\n", "def reap_fileserver_cache_dir(cache_base, find_func):\n '''\n Remove unused cache items assuming the cache directory follows a directory\n convention:\n\n cache_base -> saltenv -> relpath\n '''\n for saltenv in os.listdir(cache_base):\n env_base = os.path.join(cache_base, saltenv)\n for root, dirs, files in salt.utils.path.os_walk(env_base):\n # if we have an empty directory, lets cleanup\n # This will only remove the directory on the second time\n # \"_reap_cache\" is called (which is intentional)\n if not dirs and not files:\n # only remove if empty directory is older than 60s\n if time.time() - os.path.getctime(root) > 60:\n os.rmdir(root)\n continue\n # if not, lets check the files in the directory\n for file_ in files:\n file_path = os.path.join(root, file_)\n file_rel_path = os.path.relpath(file_path, env_base)\n try:\n filename, _, hash_type = file_rel_path.rsplit('.', 2)\n except ValueError:\n log.warning(\n 'Found invalid hash file [%s] when attempting to reap '\n 'cache directory', file_\n )\n continue\n # do we have the file?\n ret = find_func(filename, saltenv=saltenv)\n # if we don't actually have the file, lets clean up the cache\n # object\n if ret['path'] == '':\n os.unlink(file_path)\n", "def clear_lock(remote=None):\n '''\n Clear update.lk\n\n ``remote`` can either be a dictionary containing repo configuration\n information, or a pattern. If the latter, then remotes for which the URL\n matches the pattern will be locked.\n '''\n def _do_clear_lock(repo):\n def _add_error(errlist, repo, exc):\n msg = ('Unable to remove update lock for {0} ({1}): {2} '\n .format(repo['url'], repo['lockfile'], exc))\n log.debug(msg)\n errlist.append(msg)\n success = []\n failed = []\n if os.path.exists(repo['lockfile']):\n try:\n os.remove(repo['lockfile'])\n except OSError as exc:\n if exc.errno == errno.EISDIR:\n # Somehow this path is a directory. Should never happen\n # unless some wiseguy manually creates a directory at this\n # path, but just in case, handle it.\n try:\n shutil.rmtree(repo['lockfile'])\n except OSError as exc:\n _add_error(failed, repo, exc)\n else:\n _add_error(failed, repo, exc)\n else:\n msg = 'Removed lock for {0}'.format(repo['url'])\n log.debug(msg)\n success.append(msg)\n return success, failed\n\n if isinstance(remote, dict):\n return _do_clear_lock(remote)\n\n cleared = []\n errors = []\n for repo in init():\n if remote:\n try:\n if not fnmatch.fnmatch(repo['url'], remote):\n continue\n except TypeError:\n # remote was non-string, try again\n if not fnmatch.fnmatch(repo['url'], six.text_type(remote)):\n continue\n success, failed = _do_clear_lock(repo)\n cleared.extend(success)\n errors.extend(failed)\n return cleared, errors\n", "def _clear_old_remotes():\n '''\n Remove cache directories for remotes no longer configured\n '''\n bp_ = os.path.join(__opts__['cachedir'], 'hgfs')\n try:\n cachedir_ls = os.listdir(bp_)\n except OSError:\n cachedir_ls = []\n repos = init()\n # Remove actively-used remotes from list\n for repo in repos:\n try:\n cachedir_ls.remove(repo['hash'])\n except ValueError:\n pass\n to_remove = []\n for item in cachedir_ls:\n if item in ('hash', 'refs'):\n continue\n path = os.path.join(bp_, item)\n if os.path.isdir(path):\n to_remove.append(path)\n failed = []\n if to_remove:\n for rdir in to_remove:\n try:\n shutil.rmtree(rdir)\n except OSError as exc:\n log.error(\n 'Unable to remove old hgfs remote cachedir %s: %s',\n rdir, exc\n )\n failed.append(rdir)\n else:\n log.debug('hgfs removed old cachedir %s', rdir)\n for fdir in failed:\n to_remove.remove(fdir)\n return bool(to_remove), repos\n", "def dumps(self, msg, use_bin_type=False):\n '''\n Run the correct dumps serialization format\n\n :param use_bin_type: Useful for Python 3 support. Tells msgpack to\n differentiate between 'str' and 'bytes' types\n by encoding them differently.\n Since this changes the wire protocol, this\n option should not be used outside of IPC.\n '''\n def ext_type_encoder(obj):\n if isinstance(obj, six.integer_types):\n # msgpack can't handle the very long Python longs for jids\n # Convert any very long longs to strings\n return six.text_type(obj)\n elif isinstance(obj, (datetime.datetime, datetime.date)):\n # msgpack doesn't support datetime.datetime and datetime.date datatypes.\n # So here we have converted these types to custom datatype\n # This is msgpack Extended types numbered 78\n return msgpack.ExtType(78, salt.utils.stringutils.to_bytes(\n obj.strftime('%Y%m%dT%H:%M:%S.%f')))\n # The same for immutable types\n elif isinstance(obj, immutabletypes.ImmutableDict):\n return dict(obj)\n elif isinstance(obj, immutabletypes.ImmutableList):\n return list(obj)\n elif isinstance(obj, (set, immutabletypes.ImmutableSet)):\n # msgpack can't handle set so translate it to tuple\n return tuple(obj)\n elif isinstance(obj, CaseInsensitiveDict):\n return dict(obj)\n # Nothing known exceptions found. Let msgpack raise it's own.\n return obj\n\n try:\n if msgpack.version >= (0, 4, 0):\n # msgpack only supports 'use_bin_type' starting in 0.4.0.\n # Due to this, if we don't need it, don't pass it at all so\n # that under Python 2 we can still work with older versions\n # of msgpack.\n return salt.utils.msgpack.dumps(msg, default=ext_type_encoder,\n use_bin_type=use_bin_type,\n _msgpack_module=msgpack)\n else:\n return salt.utils.msgpack.dumps(msg, default=ext_type_encoder,\n _msgpack_module=msgpack)\n except (OverflowError, msgpack.exceptions.PackValueError):\n # msgpack<=0.4.6 don't call ext encoder on very long integers raising the error instead.\n # Convert any very long longs to strings and call dumps again.\n def verylong_encoder(obj, context):\n # Make sure we catch recursion here.\n objid = id(obj)\n if objid in context:\n return '<Recursion on {} with id={}>'.format(type(obj).__name__, id(obj))\n context.add(objid)\n\n if isinstance(obj, dict):\n for key, value in six.iteritems(obj.copy()):\n obj[key] = verylong_encoder(value, context)\n return dict(obj)\n elif isinstance(obj, (list, tuple)):\n obj = list(obj)\n for idx, entry in enumerate(obj):\n obj[idx] = verylong_encoder(entry, context)\n return obj\n # A value of an Integer object is limited from -(2^63) upto (2^64)-1 by MessagePack\n # spec. Here we care only of JIDs that are positive integers.\n if isinstance(obj, six.integer_types) and obj >= pow(2, 64):\n return six.text_type(obj)\n else:\n return obj\n\n msg = verylong_encoder(msg, set())\n if msgpack.version >= (0, 4, 0):\n return salt.utils.msgpack.dumps(msg, default=ext_type_encoder,\n use_bin_type=use_bin_type,\n _msgpack_module=msgpack)\n else:\n return salt.utils.msgpack.dumps(msg, default=ext_type_encoder,\n _msgpack_module=msgpack)\n" ]
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
envs
python
def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)]
Return a list of refs that can be used as environments
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L610-L636
[ "def init():\n '''\n Return a list of hglib objects for the various hgfs remotes\n '''\n bp_ = os.path.join(__opts__['cachedir'], 'hgfs')\n new_remote = False\n repos = []\n\n per_remote_defaults = {}\n for param in PER_REMOTE_OVERRIDES:\n per_remote_defaults[param] = \\\n six.text_type(__opts__['hgfs_{0}'.format(param)])\n\n for remote in __opts__['hgfs_remotes']:\n repo_conf = copy.deepcopy(per_remote_defaults)\n if isinstance(remote, dict):\n repo_url = next(iter(remote))\n per_remote_conf = dict(\n [(key, six.text_type(val)) for key, val in\n six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]\n )\n if not per_remote_conf:\n log.error(\n 'Invalid per-remote configuration for hgfs remote %s. If '\n 'no per-remote parameters are being specified, there may '\n 'be a trailing colon after the URL, which should be '\n 'removed. Check the master configuration file.', repo_url\n )\n _failhard()\n\n branch_method = \\\n per_remote_conf.get('branch_method',\n per_remote_defaults['branch_method'])\n if branch_method not in VALID_BRANCH_METHODS:\n log.error(\n 'Invalid branch_method \\'%s\\' for remote %s. Valid '\n 'branch methods are: %s. This remote will be ignored.',\n branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS)\n )\n _failhard()\n\n per_remote_errors = False\n for param in (x for x in per_remote_conf\n if x not in PER_REMOTE_OVERRIDES):\n log.error(\n 'Invalid configuration parameter \\'%s\\' for remote %s. '\n 'Valid parameters are: %s. See the documentation for '\n 'further information.',\n param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)\n )\n per_remote_errors = True\n if per_remote_errors:\n _failhard()\n\n repo_conf.update(per_remote_conf)\n else:\n repo_url = remote\n\n if not isinstance(repo_url, six.string_types):\n log.error(\n 'Invalid hgfs remote %s. Remotes must be strings, you may '\n 'need to enclose the URL in quotes', repo_url\n )\n _failhard()\n\n try:\n repo_conf['mountpoint'] = salt.utils.url.strip_proto(\n repo_conf['mountpoint']\n )\n except TypeError:\n # mountpoint not specified\n pass\n\n hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))\n repo_hash = hash_type(repo_url).hexdigest()\n rp_ = os.path.join(bp_, repo_hash)\n if not os.path.isdir(rp_):\n os.makedirs(rp_)\n\n if not os.listdir(rp_):\n # Only init if the directory is empty.\n hglib.init(rp_)\n new_remote = True\n try:\n repo = hglib.open(rp_)\n except hglib.error.ServerError:\n log.error(\n 'Cache path %s (corresponding remote: %s) exists but is not '\n 'a valid mercurial repository. You will need to manually '\n 'delete this directory on the master to continue to use this '\n 'hgfs remote.', rp_, repo_url\n )\n _failhard()\n except Exception as exc:\n log.error(\n 'Exception \\'%s\\' encountered while initializing hgfs '\n 'remote %s', exc, repo_url\n )\n _failhard()\n\n try:\n refs = repo.config(names='paths')\n except hglib.error.CommandError:\n refs = None\n\n # Do NOT put this if statement inside the except block above. Earlier\n # versions of hglib did not raise an exception, so we need to do it\n # this way to support both older and newer hglib.\n if not refs:\n # Write an hgrc defining the remote URL\n hgconfpath = os.path.join(rp_, '.hg', 'hgrc')\n with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig:\n hgconfig.write('[paths]\\n')\n hgconfig.write(\n salt.utils.stringutils.to_str(\n 'default = {0}\\n'.format(repo_url)\n )\n )\n\n repo_conf.update({\n 'repo': repo,\n 'url': repo_url,\n 'hash': repo_hash,\n 'cachedir': rp_,\n 'lockfile': os.path.join(__opts__['cachedir'],\n 'hgfs',\n '{0}.update.lk'.format(repo_hash))\n })\n repos.append(repo_conf)\n repo.close()\n\n if new_remote:\n remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt')\n try:\n with salt.utils.files.fopen(remote_map, 'w+') as fp_:\n timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')\n fp_.write('# hgfs_remote map as of {0}\\n'.format(timestamp))\n for repo in repos:\n fp_.write(\n salt.utils.stringutils.to_str(\n '{0} = {1}\\n'.format(repo['hash'], repo['url'])\n )\n )\n except OSError:\n pass\n else:\n log.info('Wrote new hgfs_remote map to %s', remote_map)\n\n return repos\n", "def check_env_cache(opts, env_cache):\n '''\n Returns cached env names, if present. Otherwise returns None.\n '''\n if not os.path.isfile(env_cache):\n return None\n try:\n with salt.utils.files.fopen(env_cache, 'rb') as fp_:\n log.trace('Returning env cache data from %s', env_cache)\n serial = salt.payload.Serial(opts)\n return salt.utils.data.decode(serial.load(fp_))\n except (IOError, OSError):\n pass\n return None\n", "def _all_branches(repo):\n '''\n Returns all branches for the specified repo\n '''\n # repo.branches() returns a list of 3-tuples consisting of\n # (branch name, rev #, nodeid)\n # Example: [('default', 4, '7c96229269fa')]\n return repo.branches()\n", "def _all_bookmarks(repo):\n '''\n Returns all bookmarks for the specified repo\n '''\n # repo.bookmarks() returns a tuple containing the following:\n # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid)\n # 2. The index of the current bookmark (-1 if no current one)\n # Example: ([('mymark', 4, '7c96229269fa')], -1)\n return repo.bookmarks()[0]\n", "def _all_tags(repo):\n '''\n Returns all tags for the specified repo\n '''\n # repo.tags() returns a list of 4-tuples consisting of\n # (tag name, rev #, nodeid, islocal)\n # Example: [('1.0', 3, '3be15e71b31a', False),\n # ('tip', 4, '7c96229269fa', False)]\n # Avoid returning the special 'tip' tag.\n return [x for x in repo.tags() if x[0] != 'tip']\n" ]
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
find_file
python
def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd
Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L639-L742
[ "def init():\n '''\n Return a list of hglib objects for the various hgfs remotes\n '''\n bp_ = os.path.join(__opts__['cachedir'], 'hgfs')\n new_remote = False\n repos = []\n\n per_remote_defaults = {}\n for param in PER_REMOTE_OVERRIDES:\n per_remote_defaults[param] = \\\n six.text_type(__opts__['hgfs_{0}'.format(param)])\n\n for remote in __opts__['hgfs_remotes']:\n repo_conf = copy.deepcopy(per_remote_defaults)\n if isinstance(remote, dict):\n repo_url = next(iter(remote))\n per_remote_conf = dict(\n [(key, six.text_type(val)) for key, val in\n six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))]\n )\n if not per_remote_conf:\n log.error(\n 'Invalid per-remote configuration for hgfs remote %s. If '\n 'no per-remote parameters are being specified, there may '\n 'be a trailing colon after the URL, which should be '\n 'removed. Check the master configuration file.', repo_url\n )\n _failhard()\n\n branch_method = \\\n per_remote_conf.get('branch_method',\n per_remote_defaults['branch_method'])\n if branch_method not in VALID_BRANCH_METHODS:\n log.error(\n 'Invalid branch_method \\'%s\\' for remote %s. Valid '\n 'branch methods are: %s. This remote will be ignored.',\n branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS)\n )\n _failhard()\n\n per_remote_errors = False\n for param in (x for x in per_remote_conf\n if x not in PER_REMOTE_OVERRIDES):\n log.error(\n 'Invalid configuration parameter \\'%s\\' for remote %s. '\n 'Valid parameters are: %s. See the documentation for '\n 'further information.',\n param, repo_url, ', '.join(PER_REMOTE_OVERRIDES)\n )\n per_remote_errors = True\n if per_remote_errors:\n _failhard()\n\n repo_conf.update(per_remote_conf)\n else:\n repo_url = remote\n\n if not isinstance(repo_url, six.string_types):\n log.error(\n 'Invalid hgfs remote %s. Remotes must be strings, you may '\n 'need to enclose the URL in quotes', repo_url\n )\n _failhard()\n\n try:\n repo_conf['mountpoint'] = salt.utils.url.strip_proto(\n repo_conf['mountpoint']\n )\n except TypeError:\n # mountpoint not specified\n pass\n\n hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5'))\n repo_hash = hash_type(repo_url).hexdigest()\n rp_ = os.path.join(bp_, repo_hash)\n if not os.path.isdir(rp_):\n os.makedirs(rp_)\n\n if not os.listdir(rp_):\n # Only init if the directory is empty.\n hglib.init(rp_)\n new_remote = True\n try:\n repo = hglib.open(rp_)\n except hglib.error.ServerError:\n log.error(\n 'Cache path %s (corresponding remote: %s) exists but is not '\n 'a valid mercurial repository. You will need to manually '\n 'delete this directory on the master to continue to use this '\n 'hgfs remote.', rp_, repo_url\n )\n _failhard()\n except Exception as exc:\n log.error(\n 'Exception \\'%s\\' encountered while initializing hgfs '\n 'remote %s', exc, repo_url\n )\n _failhard()\n\n try:\n refs = repo.config(names='paths')\n except hglib.error.CommandError:\n refs = None\n\n # Do NOT put this if statement inside the except block above. Earlier\n # versions of hglib did not raise an exception, so we need to do it\n # this way to support both older and newer hglib.\n if not refs:\n # Write an hgrc defining the remote URL\n hgconfpath = os.path.join(rp_, '.hg', 'hgrc')\n with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig:\n hgconfig.write('[paths]\\n')\n hgconfig.write(\n salt.utils.stringutils.to_str(\n 'default = {0}\\n'.format(repo_url)\n )\n )\n\n repo_conf.update({\n 'repo': repo,\n 'url': repo_url,\n 'hash': repo_hash,\n 'cachedir': rp_,\n 'lockfile': os.path.join(__opts__['cachedir'],\n 'hgfs',\n '{0}.update.lk'.format(repo_hash))\n })\n repos.append(repo_conf)\n repo.close()\n\n if new_remote:\n remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt')\n try:\n with salt.utils.files.fopen(remote_map, 'w+') as fp_:\n timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f')\n fp_.write('# hgfs_remote map as of {0}\\n'.format(timestamp))\n for repo in repos:\n fp_.write(\n salt.utils.stringutils.to_str(\n '{0} = {1}\\n'.format(repo['hash'], repo['url'])\n )\n )\n except OSError:\n pass\n else:\n log.info('Wrote new hgfs_remote map to %s', remote_map)\n\n return repos\n", "def envs(ignore_cache=False):\n '''\n Return a list of refs that can be used as environments\n '''\n if not ignore_cache:\n env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p')\n cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)\n if cache_match is not None:\n return cache_match\n ret = set()\n for repo in init():\n repo['repo'].open()\n if repo['branch_method'] in ('branches', 'mixed'):\n for branch in _all_branches(repo['repo']):\n branch_name = branch[0]\n if branch_name == repo['base']:\n branch_name = 'base'\n ret.add(branch_name)\n if repo['branch_method'] in ('bookmarks', 'mixed'):\n for bookmark in _all_bookmarks(repo['repo']):\n bookmark_name = bookmark[0]\n if bookmark_name == repo['base']:\n bookmark_name = 'base'\n ret.add(bookmark_name)\n ret.update([x[0] for x in _all_tags(repo['repo'])])\n repo['repo'].close()\n return [x for x in sorted(ret) if _env_is_exposed(x)]\n", "def wait_lock(lk_fn, dest, wait_timeout=0):\n '''\n If the write lock is there, check to see if the file is actually being\n written. If there is no change in the file size after a short sleep,\n remove the lock and move forward.\n '''\n if not os.path.exists(lk_fn):\n return False\n if not os.path.exists(dest):\n # The dest is not here, sleep for a bit, if the dest is not here yet\n # kill the lockfile and start the write\n time.sleep(1)\n if not os.path.isfile(dest):\n _unlock_cache(lk_fn)\n return False\n timeout = None\n if wait_timeout:\n timeout = time.time() + wait_timeout\n # There is a lock file, the dest is there, stat the dest, sleep and check\n # that the dest is being written, if it is not being written kill the lock\n # file and continue. Also check if the lock file is gone.\n s_count = 0\n s_size = os.stat(dest).st_size\n while True:\n time.sleep(1)\n if not os.path.exists(lk_fn):\n return False\n size = os.stat(dest).st_size\n if size == s_size:\n s_count += 1\n if s_count >= 3:\n # The file is not being written to, kill the lock and proceed\n _unlock_cache(lk_fn)\n return False\n else:\n s_size = size\n if timeout:\n if time.time() > timeout:\n raise ValueError(\n 'Timeout({0}s) for {1} (lock: {2}) elapsed'.format(\n wait_timeout, dest, lk_fn\n )\n )\n return False\n", "def _get_ref(repo, name):\n '''\n Return ref tuple if ref is in the repo.\n '''\n if name == 'base':\n name = repo['base']\n if name == repo['base'] or name in envs():\n if repo['branch_method'] == 'branches':\n return _get_branch(repo['repo'], name) \\\n or _get_tag(repo['repo'], name)\n elif repo['branch_method'] == 'bookmarks':\n return _get_bookmark(repo['repo'], name) \\\n or _get_tag(repo['repo'], name)\n elif repo['branch_method'] == 'mixed':\n return _get_branch(repo['repo'], name) \\\n or _get_bookmark(repo['repo'], name) \\\n or _get_tag(repo['repo'], name)\n return False\n" ]
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
file_hash
python
def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret
Return a file hash, the hash type is set in the master config file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L774-L800
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n" ]
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
_file_lists
python
def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return []
Return a dict containing the file lists for files and dirs
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L803-L836
[ "def check_file_list_cache(opts, form, list_cache, w_lock):\n '''\n Checks the cache file to see if there is a new enough file list cache, and\n returns the match (if found, along with booleans used by the fileserver\n backend to determine if the cache needs to be refreshed/written).\n '''\n refresh_cache = False\n save_cache = True\n serial = salt.payload.Serial(opts)\n wait_lock(w_lock, list_cache, 5 * 60)\n if not os.path.isfile(list_cache) and _lock_cache(w_lock):\n refresh_cache = True\n else:\n attempt = 0\n while attempt < 11:\n try:\n if os.path.exists(w_lock):\n # wait for a filelist lock for max 15min\n wait_lock(w_lock, list_cache, 15 * 60)\n if os.path.exists(list_cache):\n # calculate filelist age is possible\n cache_stat = os.stat(list_cache)\n # st_time can have a greater precision than time, removing\n # float precision makes sure age will never be a negative\n # number.\n current_time = int(time.time())\n file_mtime = int(cache_stat.st_mtime)\n if file_mtime > current_time:\n log.debug(\n 'Cache file modified time is in the future, ignoring. '\n 'file=%s mtime=%s current_time=%s',\n list_cache, current_time, file_mtime\n )\n age = 0\n else:\n age = current_time - file_mtime\n else:\n # if filelist does not exists yet, mark it as expired\n age = opts.get('fileserver_list_cache_time', 20) + 1\n if age < 0:\n # Cache is from the future! Warn and mark cache invalid.\n log.warning('The file list_cache was created in the future!')\n if 0 <= age < opts.get('fileserver_list_cache_time', 20):\n # Young enough! Load this sucker up!\n with salt.utils.files.fopen(list_cache, 'rb') as fp_:\n log.debug(\n \"Returning file list from cache: age=%s cache_time=%s %s\",\n age, opts.get('fileserver_list_cache_time', 20), list_cache\n )\n return salt.utils.data.decode(serial.load(fp_).get(form, [])), False, False\n elif _lock_cache(w_lock):\n # Set the w_lock and go\n refresh_cache = True\n break\n except Exception:\n time.sleep(0.2)\n attempt += 1\n continue\n if attempt > 10:\n save_cache = False\n refresh_cache = True\n return None, refresh_cache, save_cache\n", "def write_file_list_cache(opts, data, list_cache, w_lock):\n '''\n Checks the cache file to see if there is a new enough file list cache, and\n returns the match (if found, along with booleans used by the fileserver\n backend to determine if the cache needs to be refreshed/written).\n '''\n serial = salt.payload.Serial(opts)\n with salt.utils.files.fopen(list_cache, 'w+b') as fp_:\n fp_.write(serial.dumps(data))\n _unlock_cache(w_lock)\n log.trace('Lockfile %s removed', w_lock)\n", "def _get_file_list(load):\n '''\n Get a list of all files on the file server in a specified environment\n '''\n if 'env' in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop('env')\n\n if 'saltenv' not in load or load['saltenv'] not in envs():\n return []\n ret = set()\n for repo in init():\n repo['repo'].open()\n ref = _get_ref(repo, load['saltenv'])\n if ref:\n manifest = repo['repo'].manifest(rev=ref[1])\n for tup in manifest:\n relpath = os.path.relpath(tup[4], repo['root'])\n # Don't add files outside the hgfs_root\n if not relpath.startswith('../'):\n ret.add(os.path.join(repo['mountpoint'], relpath))\n repo['repo'].close()\n return sorted(ret)\n", "def _get_dir_list(load):\n '''\n Get a list of all directories on the master\n '''\n if 'env' in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop('env')\n\n if 'saltenv' not in load or load['saltenv'] not in envs():\n return []\n ret = set()\n for repo in init():\n repo['repo'].open()\n ref = _get_ref(repo, load['saltenv'])\n if ref:\n manifest = repo['repo'].manifest(rev=ref[1])\n for tup in manifest:\n filepath = tup[4]\n split = filepath.rsplit('/', 1)\n while len(split) > 1:\n relpath = os.path.relpath(split[0], repo['root'])\n # Don't add '.'\n if relpath != '.':\n # Don't add files outside the hgfs_root\n if not relpath.startswith('../'):\n ret.add(os.path.join(repo['mountpoint'], relpath))\n split = split[0].rsplit('/', 1)\n repo['repo'].close()\n if repo['mountpoint']:\n ret.add(repo['mountpoint'])\n return sorted(ret)\n" ]
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
_get_file_list
python
def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret)
Get a list of all files on the file server in a specified environment
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L846-L868
null
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs') def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
saltstack/salt
salt/fileserver/hgfs.py
_get_dir_list
python
def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
Get a list of all directories on the master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/hgfs.py#L886-L916
null
# -*- coding: utf-8 -*- ''' Mercurial Fileserver Backend To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the Master config file. .. code-block:: yaml fileserver_backend: - hgfs .. note:: ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would work. After enabling this backend, branches, bookmarks, and tags in a remote mercurial repository are exposed to salt as different environments. This feature is managed by the :conf_master:`fileserver_backend` option in the salt master config file. This fileserver has an additional option :conf_master:`hgfs_branch_method` that will set the desired branch method. Possible values are: ``branches``, ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the ``default`` branch will be mapped to ``base``. .. versionchanged:: 2014.1.0 The :conf_master:`hgfs_base` master config parameter was added, allowing for a branch other than ``default`` to be used for the ``base`` environment, and allowing for a ``base`` environment to be specified when using an :conf_master:`hgfs_branch_method` of ``bookmarks``. :depends: - mercurial - python bindings for mercurial (``python-hglib``) ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import errno import fnmatch import glob import hashlib import logging import os import shutil from datetime import datetime from salt.exceptions import FileserverConfigError VALID_BRANCH_METHODS = ('branches', 'bookmarks', 'mixed') PER_REMOTE_OVERRIDES = ('base', 'branch_method', 'mountpoint', 'root') # Import third party libs from salt.ext import six # pylint: disable=import-error try: import hglib HAS_HG = True except ImportError: HAS_HG = False # pylint: enable=import-error # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.gzip_util import salt.utils.hashutils import salt.utils.stringutils import salt.utils.url import salt.utils.versions import salt.fileserver from salt.utils.event import tagify log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'hg' def __virtual__(): ''' Only load if mercurial is available ''' if __virtualname__ not in __opts__['fileserver_backend']: return False if not HAS_HG: log.error('Mercurial fileserver backend is enabled in configuration ' 'but could not be loaded, is hglib installed?') return False if __opts__['hgfs_branch_method'] not in VALID_BRANCH_METHODS: log.error( 'Invalid hgfs_branch_method \'%s\'. Valid methods are: %s', __opts__['hgfs_branch_method'], VALID_BRANCH_METHODS ) return False return __virtualname__ def _all_branches(repo): ''' Returns all branches for the specified repo ''' # repo.branches() returns a list of 3-tuples consisting of # (branch name, rev #, nodeid) # Example: [('default', 4, '7c96229269fa')] return repo.branches() def _get_branch(repo, name): ''' Find the requested branch in the specified repo ''' try: return [x for x in _all_branches(repo) if x[0] == name][0] except IndexError: return False def _all_bookmarks(repo): ''' Returns all bookmarks for the specified repo ''' # repo.bookmarks() returns a tuple containing the following: # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid) # 2. The index of the current bookmark (-1 if no current one) # Example: ([('mymark', 4, '7c96229269fa')], -1) return repo.bookmarks()[0] def _get_bookmark(repo, name): ''' Find the requested bookmark in the specified repo ''' try: return [x for x in _all_bookmarks(repo) if x[0] == name][0] except IndexError: return False def _all_tags(repo): ''' Returns all tags for the specified repo ''' # repo.tags() returns a list of 4-tuples consisting of # (tag name, rev #, nodeid, islocal) # Example: [('1.0', 3, '3be15e71b31a', False), # ('tip', 4, '7c96229269fa', False)] # Avoid returning the special 'tip' tag. return [x for x in repo.tags() if x[0] != 'tip'] def _get_tag(repo, name): ''' Find the requested tag in the specified repo ''' try: return [x for x in _all_tags(repo) if x[0] == name][0] except IndexError: return False def _get_ref(repo, name): ''' Return ref tuple if ref is in the repo. ''' if name == 'base': name = repo['base'] if name == repo['base'] or name in envs(): if repo['branch_method'] == 'branches': return _get_branch(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'bookmarks': return _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) elif repo['branch_method'] == 'mixed': return _get_branch(repo['repo'], name) \ or _get_bookmark(repo['repo'], name) \ or _get_tag(repo['repo'], name) return False def _failhard(): ''' Fatal fileserver configuration issue, raise an exception ''' raise FileserverConfigError( 'Failed to load hg fileserver backend' ) def init(): ''' Return a list of hglib objects for the various hgfs remotes ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') new_remote = False repos = [] per_remote_defaults = {} for param in PER_REMOTE_OVERRIDES: per_remote_defaults[param] = \ six.text_type(__opts__['hgfs_{0}'.format(param)]) for remote in __opts__['hgfs_remotes']: repo_conf = copy.deepcopy(per_remote_defaults) if isinstance(remote, dict): repo_url = next(iter(remote)) per_remote_conf = dict( [(key, six.text_type(val)) for key, val in six.iteritems(salt.utils.data.repack_dictlist(remote[repo_url]))] ) if not per_remote_conf: log.error( 'Invalid per-remote configuration for hgfs remote %s. If ' 'no per-remote parameters are being specified, there may ' 'be a trailing colon after the URL, which should be ' 'removed. Check the master configuration file.', repo_url ) _failhard() branch_method = \ per_remote_conf.get('branch_method', per_remote_defaults['branch_method']) if branch_method not in VALID_BRANCH_METHODS: log.error( 'Invalid branch_method \'%s\' for remote %s. Valid ' 'branch methods are: %s. This remote will be ignored.', branch_method, repo_url, ', '.join(VALID_BRANCH_METHODS) ) _failhard() per_remote_errors = False for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES): log.error( 'Invalid configuration parameter \'%s\' for remote %s. ' 'Valid parameters are: %s. See the documentation for ' 'further information.', param, repo_url, ', '.join(PER_REMOTE_OVERRIDES) ) per_remote_errors = True if per_remote_errors: _failhard() repo_conf.update(per_remote_conf) else: repo_url = remote if not isinstance(repo_url, six.string_types): log.error( 'Invalid hgfs remote %s. Remotes must be strings, you may ' 'need to enclose the URL in quotes', repo_url ) _failhard() try: repo_conf['mountpoint'] = salt.utils.url.strip_proto( repo_conf['mountpoint'] ) except TypeError: # mountpoint not specified pass hash_type = getattr(hashlib, __opts__.get('hash_type', 'md5')) repo_hash = hash_type(repo_url).hexdigest() rp_ = os.path.join(bp_, repo_hash) if not os.path.isdir(rp_): os.makedirs(rp_) if not os.listdir(rp_): # Only init if the directory is empty. hglib.init(rp_) new_remote = True try: repo = hglib.open(rp_) except hglib.error.ServerError: log.error( 'Cache path %s (corresponding remote: %s) exists but is not ' 'a valid mercurial repository. You will need to manually ' 'delete this directory on the master to continue to use this ' 'hgfs remote.', rp_, repo_url ) _failhard() except Exception as exc: log.error( 'Exception \'%s\' encountered while initializing hgfs ' 'remote %s', exc, repo_url ) _failhard() try: refs = repo.config(names='paths') except hglib.error.CommandError: refs = None # Do NOT put this if statement inside the except block above. Earlier # versions of hglib did not raise an exception, so we need to do it # this way to support both older and newer hglib. if not refs: # Write an hgrc defining the remote URL hgconfpath = os.path.join(rp_, '.hg', 'hgrc') with salt.utils.files.fopen(hgconfpath, 'w+') as hgconfig: hgconfig.write('[paths]\n') hgconfig.write( salt.utils.stringutils.to_str( 'default = {0}\n'.format(repo_url) ) ) repo_conf.update({ 'repo': repo, 'url': repo_url, 'hash': repo_hash, 'cachedir': rp_, 'lockfile': os.path.join(__opts__['cachedir'], 'hgfs', '{0}.update.lk'.format(repo_hash)) }) repos.append(repo_conf) repo.close() if new_remote: remote_map = os.path.join(__opts__['cachedir'], 'hgfs/remote_map.txt') try: with salt.utils.files.fopen(remote_map, 'w+') as fp_: timestamp = datetime.now().strftime('%d %b %Y %H:%M:%S.%f') fp_.write('# hgfs_remote map as of {0}\n'.format(timestamp)) for repo in repos: fp_.write( salt.utils.stringutils.to_str( '{0} = {1}\n'.format(repo['hash'], repo['url']) ) ) except OSError: pass else: log.info('Wrote new hgfs_remote map to %s', remote_map) return repos def _clear_old_remotes(): ''' Remove cache directories for remotes no longer configured ''' bp_ = os.path.join(__opts__['cachedir'], 'hgfs') try: cachedir_ls = os.listdir(bp_) except OSError: cachedir_ls = [] repos = init() # Remove actively-used remotes from list for repo in repos: try: cachedir_ls.remove(repo['hash']) except ValueError: pass to_remove = [] for item in cachedir_ls: if item in ('hash', 'refs'): continue path = os.path.join(bp_, item) if os.path.isdir(path): to_remove.append(path) failed = [] if to_remove: for rdir in to_remove: try: shutil.rmtree(rdir) except OSError as exc: log.error( 'Unable to remove old hgfs remote cachedir %s: %s', rdir, exc ) failed.append(rdir) else: log.debug('hgfs removed old cachedir %s', rdir) for fdir in failed: to_remove.remove(fdir) return bool(to_remove), repos def clear_cache(): ''' Completely clear hgfs cache ''' fsb_cachedir = os.path.join(__opts__['cachedir'], 'hgfs') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') errors = [] for rdir in (fsb_cachedir, list_cachedir): if os.path.exists(rdir): try: shutil.rmtree(rdir) except OSError as exc: errors.append('Unable to delete {0}: {1}'.format(rdir, exc)) return errors def clear_lock(remote=None): ''' Clear update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_clear_lock(repo): def _add_error(errlist, repo, exc): msg = ('Unable to remove update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) errlist.append(msg) success = [] failed = [] if os.path.exists(repo['lockfile']): try: os.remove(repo['lockfile']) except OSError as exc: if exc.errno == errno.EISDIR: # Somehow this path is a directory. Should never happen # unless some wiseguy manually creates a directory at this # path, but just in case, handle it. try: shutil.rmtree(repo['lockfile']) except OSError as exc: _add_error(failed, repo, exc) else: _add_error(failed, repo, exc) else: msg = 'Removed lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_clear_lock(remote) cleared = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_clear_lock(repo) cleared.extend(success) errors.extend(failed) return cleared, errors def lock(remote=None): ''' Place an update.lk ``remote`` can either be a dictionary containing repo configuration information, or a pattern. If the latter, then remotes for which the URL matches the pattern will be locked. ''' def _do_lock(repo): success = [] failed = [] if not os.path.exists(repo['lockfile']): try: with salt.utils.files.fopen(repo['lockfile'], 'w'): pass except (IOError, OSError) as exc: msg = ('Unable to set update lock for {0} ({1}): {2} ' .format(repo['url'], repo['lockfile'], exc)) log.debug(msg) failed.append(msg) else: msg = 'Set lock for {0}'.format(repo['url']) log.debug(msg) success.append(msg) return success, failed if isinstance(remote, dict): return _do_lock(remote) locked = [] errors = [] for repo in init(): if remote: try: if not fnmatch.fnmatch(repo['url'], remote): continue except TypeError: # remote was non-string, try again if not fnmatch.fnmatch(repo['url'], six.text_type(remote)): continue success, failed = _do_lock(repo) locked.extend(success) errors.extend(failed) return locked, errors def update(): ''' Execute an hg pull on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'hgfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for hgfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock hgfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for hgfs remote %s, skipping.', repo['url'] ) continue log.debug('hgfs is fetching from %s', repo['url']) repo['repo'].open() curtip = repo['repo'].tip() try: repo['repo'].pull() except Exception as exc: log.error( 'Exception %s caught while updating hgfs remote %s', exc, repo['url'], exc_info_on_loglevel=logging.DEBUG ) else: newtip = repo['repo'].tip() if curtip[1] != newtip[1]: data['changed'] = True repo['repo'].close() clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['hgfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'hgfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass def _env_is_exposed(env): ''' Check if an environment is exposed by comparing it against a whitelist and blacklist. ''' if __opts__['hgfs_env_whitelist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_whitelist config option has been renamed to ' 'hgfs_saltenv_whitelist. Please update your configuration.' ) whitelist = __opts__['hgfs_env_whitelist'] else: whitelist = __opts__['hgfs_saltenv_whitelist'] if __opts__['hgfs_env_blacklist']: salt.utils.versions.warn_until( 'Neon', 'The hgfs_env_blacklist config option has been renamed to ' 'hgfs_saltenv_blacklist. Please update your configuration.' ) blacklist = __opts__['hgfs_env_blacklist'] else: blacklist = __opts__['hgfs_saltenv_blacklist'] return salt.utils.stringutils.check_whitelist_blacklist( env, whitelist=whitelist, blacklist=blacklist, ) def envs(ignore_cache=False): ''' Return a list of refs that can be used as environments ''' if not ignore_cache: env_cache = os.path.join(__opts__['cachedir'], 'hgfs/envs.p') cache_match = salt.fileserver.check_env_cache(__opts__, env_cache) if cache_match is not None: return cache_match ret = set() for repo in init(): repo['repo'].open() if repo['branch_method'] in ('branches', 'mixed'): for branch in _all_branches(repo['repo']): branch_name = branch[0] if branch_name == repo['base']: branch_name = 'base' ret.add(branch_name) if repo['branch_method'] in ('bookmarks', 'mixed'): for bookmark in _all_bookmarks(repo['repo']): bookmark_name = bookmark[0] if bookmark_name == repo['base']: bookmark_name = 'base' ret.add(bookmark_name) ret.update([x[0] for x in _all_tags(repo['repo'])]) repo['repo'].close() return [x for x in sorted(ret) if _env_is_exposed(x)] def find_file(path, tgt_env='base', **kwargs): # pylint: disable=W0613 ''' Find the first file to match the path and ref, read the file out of hg and send the path to the newly cached file ''' fnd = {'path': '', 'rel': ''} if os.path.isabs(path) or tgt_env not in envs(): return fnd dest = os.path.join(__opts__['cachedir'], 'hgfs/refs', tgt_env, path) hashes_glob = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.*'.format(path)) blobshadest = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.hash.blob_sha1'.format(path)) lk_fn = os.path.join(__opts__['cachedir'], 'hgfs/hash', tgt_env, '{0}.lk'.format(path)) destdir = os.path.dirname(dest) hashdir = os.path.dirname(blobshadest) if not os.path.isdir(destdir): try: os.makedirs(destdir) except OSError: # Path exists and is a file, remove it and retry os.remove(destdir) os.makedirs(destdir) if not os.path.isdir(hashdir): try: os.makedirs(hashdir) except OSError: # Path exists and is a file, remove it and retry os.remove(hashdir) os.makedirs(hashdir) for repo in init(): if repo['mountpoint'] \ and not path.startswith(repo['mountpoint'] + os.path.sep): continue repo_path = path[len(repo['mountpoint']):].lstrip(os.path.sep) if repo['root']: repo_path = os.path.join(repo['root'], repo_path) repo['repo'].open() ref = _get_ref(repo, tgt_env) if not ref: # Branch or tag not found in repo, try the next repo['repo'].close() continue salt.fileserver.wait_lock(lk_fn, dest) if os.path.isfile(blobshadest) and os.path.isfile(dest): with salt.utils.files.fopen(blobshadest, 'r') as fp_: sha = fp_.read() if sha == ref[2]: fnd['rel'] = path fnd['path'] = dest repo['repo'].close() return fnd try: repo['repo'].cat( ['path:{0}'.format(repo_path)], rev=ref[2], output=dest ) except hglib.error.CommandError: repo['repo'].close() continue with salt.utils.files.fopen(lk_fn, 'w'): pass for filename in glob.glob(hashes_glob): try: os.remove(filename) except Exception: pass with salt.utils.files.fopen(blobshadest, 'w+') as fp_: fp_.write(ref[2]) try: os.remove(lk_fn) except (OSError, IOError): pass fnd['rel'] = path fnd['path'] = dest try: # Converting the stat result to a list, the elements of the # list correspond to the following stat_result params: # 0 => st_mode=33188 # 1 => st_ino=10227377 # 2 => st_dev=65026 # 3 => st_nlink=1 # 4 => st_uid=1000 # 5 => st_gid=1000 # 6 => st_size=1056233 # 7 => st_atime=1468284229 # 8 => st_mtime=1456338235 # 9 => st_ctime=1456338235 fnd['stat'] = list(os.stat(dest)) except Exception: pass repo['repo'].close() return fnd return fnd def serve_file(load, fnd): ''' Return a chunk from a file based on the data received ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') ret = {'data': '', 'dest': ''} if not all(x in load for x in ('path', 'loc', 'saltenv')): return ret if not fnd['path']: return ret ret['dest'] = fnd['rel'] gzip = load.get('gzip', None) fpath = os.path.normpath(fnd['path']) with salt.utils.files.fopen(fpath, 'rb') as fp_: fp_.seek(load['loc']) data = fp_.read(__opts__['file_buffer_size']) if data and six.PY3 and not salt.utils.files.is_binary(fpath): data = data.decode(__salt_system_encoding__) if gzip and data: data = salt.utils.gzip_util.compress(data, gzip) ret['gzip'] = gzip ret['data'] = data return ret def file_hash(load, fnd): ''' Return a file hash, the hash type is set in the master config file ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if not all(x in load for x in ('path', 'saltenv')): return '' ret = {'hash_type': __opts__['hash_type']} relpath = fnd['rel'] path = fnd['path'] hashdest = os.path.join(__opts__['cachedir'], 'hgfs/hash', load['saltenv'], '{0}.hash.{1}'.format(relpath, __opts__['hash_type'])) if not os.path.isfile(hashdest): ret['hsum'] = salt.utils.hashutils.get_hash(path, __opts__['hash_type']) with salt.utils.files.fopen(hashdest, 'w+') as fp_: fp_.write(ret['hsum']) return ret else: with salt.utils.files.fopen(hashdest, 'rb') as fp_: ret['hsum'] = salt.utils.stringutils.to_unicode(fp_.read()) return ret def _file_lists(load, form): ''' Return a dict containing the file lists for files and dirs ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') list_cachedir = os.path.join(__opts__['cachedir'], 'file_lists/hgfs') if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except os.error: log.critical('Unable to make cachedir %s', list_cachedir) return [] list_cache = os.path.join(list_cachedir, '{0}.p'.format(load['saltenv'])) w_lock = os.path.join(list_cachedir, '.{0}.w'.format(load['saltenv'])) cache_match, refresh_cache, save_cache = \ salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {} ret['files'] = _get_file_list(load) ret['dirs'] = _get_dir_list(load) if save_cache: salt.fileserver.write_file_list_cache( __opts__, ret, list_cache, w_lock ) return ret.get(form, []) # Shouldn't get here, but if we do, this prevents a TypeError return [] def file_list(load): ''' Return a list of all files on the file server in a specified environment ''' return _file_lists(load, 'files') def _get_file_list(load): ''' Get a list of all files on the file server in a specified environment ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: relpath = os.path.relpath(tup[4], repo['root']) # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) repo['repo'].close() return sorted(ret) def file_list_emptydirs(load): # pylint: disable=W0613 ''' Return a list of all empty directories on the master ''' # Cannot have empty dirs in hg return [] def dir_list(load): ''' Return a list of all directories on the master ''' return _file_lists(load, 'dirs')
saltstack/salt
salt/cache/__init__.py
factory
python
def factory(opts, **kwargs): ''' Creates and returns the cache class. If memory caching is enabled by opts MemCache class will be instantiated. If not Cache class will be returned. ''' if opts.get('memcache_expire_seconds', 0): cls = MemCache else: cls = Cache return cls(opts, **kwargs)
Creates and returns the cache class. If memory caching is enabled by opts MemCache class will be instantiated. If not Cache class will be returned.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cache/__init__.py#L24-L34
null
# -*- coding: utf-8 -*- ''' Loader mechanism for caching data, with data expiration, etc. .. versionadded:: 2016.11.0 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import logging import time # Import Salt libs import salt.config from salt.ext import six from salt.payload import Serial from salt.utils.odict import OrderedDict import salt.loader import salt.syspaths log = logging.getLogger(__name__) class Cache(object): ''' Base caching object providing access to the modular cache subsystem. Related configuration options: :param cache: The name of the cache driver to use. This is the name of the python module of the `salt.cache` package. Default is `localfs`. :param serial: The module of `salt.serializers` package that should be used by the cache driver to store data. If a driver can't use a specific module or uses specific objects storage it can ignore this parameter. Terminology. Salt cache subsystem is organized as a tree with nodes and leafs like a filesystem. Cache consists of banks. Each bank can contain a number of keys. Each key can contain a dict or any other object serializable with `salt.payload.Serial`. I.e. any data object in the cache can be addressed by the path to the bank and the key name: bank: 'minions/alpha' key: 'data' Bank names should be formatted in a way that can be used as a directory structure. If slashes are included in the name, then they refer to a nested structure. Key name is a string identifier of a data container (like a file inside a directory) which will hold the data. ''' def __init__(self, opts, cachedir=None, **kwargs): self.opts = opts if cachedir is None: self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR) else: self.cachedir = cachedir self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache']) self.serial = Serial(opts) self._modules = None self._kwargs = kwargs self._kwargs['cachedir'] = self.cachedir def __lazy_init(self): self._modules = salt.loader.cache(self.opts, self.serial) fun = '{0}.init_kwargs'.format(self.driver) if fun in self.modules: self._kwargs = self.modules[fun](self._kwargs) else: self._kwargs = {} @property def modules(self): if self._modules is None: self.__lazy_init() return self._modules def cache(self, bank, key, fun, loop_fun=None, **kwargs): ''' Check cache for the data. If it is there, check to see if it needs to be refreshed. If the data is not there, or it needs to be refreshed, then call the callback function (``fun``) with any given ``**kwargs``. In some cases, the callback function returns a list of objects which need to be processed by a second function. If that is the case, then the second function is passed in as ``loop_fun``. Each item in the return list from the first function will be the only argument for the second function. ''' expire_seconds = kwargs.get('expire', 86400) # 1 day updated = self.updated(bank, key) update_cache = False if updated is None: update_cache = True else: if int(time.time()) - updated > expire_seconds: update_cache = True data = self.fetch(bank, key) if not data or update_cache is True: if loop_fun is not None: data = [] items = fun(**kwargs) for item in items: data.append(loop_fun(item)) else: data = fun(**kwargs) self.store(bank, key, data) return data def store(self, bank, key, data): ''' Store data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :param data: The data which will be stored in the cache. This data should be in a format which can be serialized by msgpack/json/yaml/etc. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.store'.format(self.driver) return self.modules[fun](bank, key, data, **self._kwargs) def fetch(self, bank, key): ''' Fetch data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return a python object fetched from the cache or an empty dict if the given path or key not found. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.fetch'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) def updated(self, bank, key): ''' Get the last updated epoch for the specified key :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return an int epoch time in seconds or None if the object wasn't found in cache. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.updated'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) def flush(self, bank, key=None): ''' Remove the key from the cache bank with all the key content. If no key is specified remove the entire bank with all keys and sub-banks inside. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.flush'.format(self.driver) return self.modules[fun](bank, key=key, **self._kwargs) def list(self, bank): ''' Lists entries stored in the specified bank. :param bank: The name of the location inside the cache which will hold the key and its associated data. :return: An iterable object containing all bank entries. Returns an empty iterator if the bank doesn't exists. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.list'.format(self.driver) return self.modules[fun](bank, **self._kwargs) def contains(self, bank, key=None): ''' Checks if the specified bank contains the specified key. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Returns True if the specified key exists in the given bank and False if not. If key is None checks for the bank existense. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.contains'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) class MemCache(Cache): ''' Short-lived in-memory cache store keeping values on time and/or size (count) basis. ''' # {<storage_id>: odict({<key>: [atime, data], ...}), ...} data = {} def __init__(self, opts, **kwargs): super(MemCache, self).__init__(opts, **kwargs) self.expire = opts.get('memcache_expire_seconds', 10) self.max = opts.get('memcache_max_items', 1024) self.cleanup = opts.get('memcache_full_cleanup', False) self.debug = opts.get('memcache_debug', False) if self.debug: self.call = 0 self.hit = 0 self._storage = None @classmethod def __cleanup(cls, expire): now = time.time() for storage in six.itervalues(cls.data): for key, data in list(storage.items()): if data[0] + expire < now: del storage[key] else: break def _get_storage_id(self): fun = '{0}.storage_id'.format(self.driver) if fun in self.modules: return self.modules[fun](self.kwargs) else: return self.driver @property def storage(self): if self._storage is None: storage_id = self._get_storage_id() if storage_id not in MemCache.data: MemCache.data[storage_id] = OrderedDict() self._storage = MemCache.data[storage_id] return self._storage def fetch(self, bank, key): if self.debug: self.call += 1 now = time.time() record = self.storage.pop((bank, key), None) # Have a cached value for the key if record is not None and record[0] + self.expire >= now: if self.debug: self.hit += 1 log.debug( 'MemCache stats (call/hit/rate): %s/%s/%s', self.call, self.hit, float(self.hit) / self.call ) # update atime and return record[0] = now self.storage[(bank, key)] = record return record[1] # Have no value for the key or value is expired data = super(MemCache, self).fetch(bank, key) if len(self.storage) >= self.max: if self.cleanup: MemCache.__cleanup(self.expire) if len(self.storage) >= self.max: self.storage.popitem(last=False) self.storage[(bank, key)] = [now, data] return data def store(self, bank, key, data): self.storage.pop((bank, key), None) super(MemCache, self).store(bank, key, data) if len(self.storage) >= self.max: if self.cleanup: MemCache.__cleanup(self.expire) if len(self.storage) >= self.max: self.storage.popitem(last=False) self.storage[(bank, key)] = [time.time(), data] def flush(self, bank, key=None): self.storage.pop((bank, key), None) super(MemCache, self).flush(bank, key)
saltstack/salt
salt/cache/__init__.py
Cache.cache
python
def cache(self, bank, key, fun, loop_fun=None, **kwargs): ''' Check cache for the data. If it is there, check to see if it needs to be refreshed. If the data is not there, or it needs to be refreshed, then call the callback function (``fun``) with any given ``**kwargs``. In some cases, the callback function returns a list of objects which need to be processed by a second function. If that is the case, then the second function is passed in as ``loop_fun``. Each item in the return list from the first function will be the only argument for the second function. ''' expire_seconds = kwargs.get('expire', 86400) # 1 day updated = self.updated(bank, key) update_cache = False if updated is None: update_cache = True else: if int(time.time()) - updated > expire_seconds: update_cache = True data = self.fetch(bank, key) if not data or update_cache is True: if loop_fun is not None: data = [] items = fun(**kwargs) for item in items: data.append(loop_fun(item)) else: data = fun(**kwargs) self.store(bank, key, data) return data
Check cache for the data. If it is there, check to see if it needs to be refreshed. If the data is not there, or it needs to be refreshed, then call the callback function (``fun``) with any given ``**kwargs``. In some cases, the callback function returns a list of objects which need to be processed by a second function. If that is the case, then the second function is passed in as ``loop_fun``. Each item in the return list from the first function will be the only argument for the second function.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cache/__init__.py#L96-L132
[ "def store(self, bank, key, data):\n '''\n Store data using the specified module\n\n :param bank:\n The name of the location inside the cache which will hold the key\n and its associated data.\n\n :param key:\n The name of the key (or file inside a directory) which will hold\n the data. File extensions should not be provided, as they will be\n added by the driver itself.\n\n :param data:\n The data which will be stored in the cache. This data should be\n in a format which can be serialized by msgpack/json/yaml/etc.\n\n :raises SaltCacheError:\n Raises an exception if cache driver detected an error accessing data\n in the cache backend (auth, permissions, etc).\n '''\n fun = '{0}.store'.format(self.driver)\n return self.modules[fun](bank, key, data, **self._kwargs)\n", "def fetch(self, bank, key):\n '''\n Fetch data using the specified module\n\n :param bank:\n The name of the location inside the cache which will hold the key\n and its associated data.\n\n :param key:\n The name of the key (or file inside a directory) which will hold\n the data. File extensions should not be provided, as they will be\n added by the driver itself.\n\n :return:\n Return a python object fetched from the cache or an empty dict if\n the given path or key not found.\n\n :raises SaltCacheError:\n Raises an exception if cache driver detected an error accessing data\n in the cache backend (auth, permissions, etc).\n '''\n fun = '{0}.fetch'.format(self.driver)\n return self.modules[fun](bank, key, **self._kwargs)\n", "def updated(self, bank, key):\n '''\n Get the last updated epoch for the specified key\n\n :param bank:\n The name of the location inside the cache which will hold the key\n and its associated data.\n\n :param key:\n The name of the key (or file inside a directory) which will hold\n the data. File extensions should not be provided, as they will be\n added by the driver itself.\n\n :return:\n Return an int epoch time in seconds or None if the object wasn't\n found in cache.\n\n :raises SaltCacheError:\n Raises an exception if cache driver detected an error accessing data\n in the cache backend (auth, permissions, etc).\n '''\n fun = '{0}.updated'.format(self.driver)\n return self.modules[fun](bank, key, **self._kwargs)\n" ]
class Cache(object): ''' Base caching object providing access to the modular cache subsystem. Related configuration options: :param cache: The name of the cache driver to use. This is the name of the python module of the `salt.cache` package. Default is `localfs`. :param serial: The module of `salt.serializers` package that should be used by the cache driver to store data. If a driver can't use a specific module or uses specific objects storage it can ignore this parameter. Terminology. Salt cache subsystem is organized as a tree with nodes and leafs like a filesystem. Cache consists of banks. Each bank can contain a number of keys. Each key can contain a dict or any other object serializable with `salt.payload.Serial`. I.e. any data object in the cache can be addressed by the path to the bank and the key name: bank: 'minions/alpha' key: 'data' Bank names should be formatted in a way that can be used as a directory structure. If slashes are included in the name, then they refer to a nested structure. Key name is a string identifier of a data container (like a file inside a directory) which will hold the data. ''' def __init__(self, opts, cachedir=None, **kwargs): self.opts = opts if cachedir is None: self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR) else: self.cachedir = cachedir self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache']) self.serial = Serial(opts) self._modules = None self._kwargs = kwargs self._kwargs['cachedir'] = self.cachedir def __lazy_init(self): self._modules = salt.loader.cache(self.opts, self.serial) fun = '{0}.init_kwargs'.format(self.driver) if fun in self.modules: self._kwargs = self.modules[fun](self._kwargs) else: self._kwargs = {} @property def modules(self): if self._modules is None: self.__lazy_init() return self._modules def store(self, bank, key, data): ''' Store data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :param data: The data which will be stored in the cache. This data should be in a format which can be serialized by msgpack/json/yaml/etc. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.store'.format(self.driver) return self.modules[fun](bank, key, data, **self._kwargs) def fetch(self, bank, key): ''' Fetch data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return a python object fetched from the cache or an empty dict if the given path or key not found. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.fetch'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) def updated(self, bank, key): ''' Get the last updated epoch for the specified key :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return an int epoch time in seconds or None if the object wasn't found in cache. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.updated'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) def flush(self, bank, key=None): ''' Remove the key from the cache bank with all the key content. If no key is specified remove the entire bank with all keys and sub-banks inside. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.flush'.format(self.driver) return self.modules[fun](bank, key=key, **self._kwargs) def list(self, bank): ''' Lists entries stored in the specified bank. :param bank: The name of the location inside the cache which will hold the key and its associated data. :return: An iterable object containing all bank entries. Returns an empty iterator if the bank doesn't exists. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.list'.format(self.driver) return self.modules[fun](bank, **self._kwargs) def contains(self, bank, key=None): ''' Checks if the specified bank contains the specified key. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Returns True if the specified key exists in the given bank and False if not. If key is None checks for the bank existense. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.contains'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs)
saltstack/salt
salt/cache/__init__.py
Cache.store
python
def store(self, bank, key, data): ''' Store data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :param data: The data which will be stored in the cache. This data should be in a format which can be serialized by msgpack/json/yaml/etc. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.store'.format(self.driver) return self.modules[fun](bank, key, data, **self._kwargs)
Store data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :param data: The data which will be stored in the cache. This data should be in a format which can be serialized by msgpack/json/yaml/etc. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cache/__init__.py#L134-L156
null
class Cache(object): ''' Base caching object providing access to the modular cache subsystem. Related configuration options: :param cache: The name of the cache driver to use. This is the name of the python module of the `salt.cache` package. Default is `localfs`. :param serial: The module of `salt.serializers` package that should be used by the cache driver to store data. If a driver can't use a specific module or uses specific objects storage it can ignore this parameter. Terminology. Salt cache subsystem is organized as a tree with nodes and leafs like a filesystem. Cache consists of banks. Each bank can contain a number of keys. Each key can contain a dict or any other object serializable with `salt.payload.Serial`. I.e. any data object in the cache can be addressed by the path to the bank and the key name: bank: 'minions/alpha' key: 'data' Bank names should be formatted in a way that can be used as a directory structure. If slashes are included in the name, then they refer to a nested structure. Key name is a string identifier of a data container (like a file inside a directory) which will hold the data. ''' def __init__(self, opts, cachedir=None, **kwargs): self.opts = opts if cachedir is None: self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR) else: self.cachedir = cachedir self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache']) self.serial = Serial(opts) self._modules = None self._kwargs = kwargs self._kwargs['cachedir'] = self.cachedir def __lazy_init(self): self._modules = salt.loader.cache(self.opts, self.serial) fun = '{0}.init_kwargs'.format(self.driver) if fun in self.modules: self._kwargs = self.modules[fun](self._kwargs) else: self._kwargs = {} @property def modules(self): if self._modules is None: self.__lazy_init() return self._modules def cache(self, bank, key, fun, loop_fun=None, **kwargs): ''' Check cache for the data. If it is there, check to see if it needs to be refreshed. If the data is not there, or it needs to be refreshed, then call the callback function (``fun``) with any given ``**kwargs``. In some cases, the callback function returns a list of objects which need to be processed by a second function. If that is the case, then the second function is passed in as ``loop_fun``. Each item in the return list from the first function will be the only argument for the second function. ''' expire_seconds = kwargs.get('expire', 86400) # 1 day updated = self.updated(bank, key) update_cache = False if updated is None: update_cache = True else: if int(time.time()) - updated > expire_seconds: update_cache = True data = self.fetch(bank, key) if not data or update_cache is True: if loop_fun is not None: data = [] items = fun(**kwargs) for item in items: data.append(loop_fun(item)) else: data = fun(**kwargs) self.store(bank, key, data) return data def fetch(self, bank, key): ''' Fetch data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return a python object fetched from the cache or an empty dict if the given path or key not found. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.fetch'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) def updated(self, bank, key): ''' Get the last updated epoch for the specified key :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return an int epoch time in seconds or None if the object wasn't found in cache. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.updated'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) def flush(self, bank, key=None): ''' Remove the key from the cache bank with all the key content. If no key is specified remove the entire bank with all keys and sub-banks inside. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.flush'.format(self.driver) return self.modules[fun](bank, key=key, **self._kwargs) def list(self, bank): ''' Lists entries stored in the specified bank. :param bank: The name of the location inside the cache which will hold the key and its associated data. :return: An iterable object containing all bank entries. Returns an empty iterator if the bank doesn't exists. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.list'.format(self.driver) return self.modules[fun](bank, **self._kwargs) def contains(self, bank, key=None): ''' Checks if the specified bank contains the specified key. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Returns True if the specified key exists in the given bank and False if not. If key is None checks for the bank existense. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.contains'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs)
saltstack/salt
salt/cache/__init__.py
Cache.fetch
python
def fetch(self, bank, key): ''' Fetch data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return a python object fetched from the cache or an empty dict if the given path or key not found. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.fetch'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs)
Fetch data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return a python object fetched from the cache or an empty dict if the given path or key not found. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cache/__init__.py#L158-L180
null
class Cache(object): ''' Base caching object providing access to the modular cache subsystem. Related configuration options: :param cache: The name of the cache driver to use. This is the name of the python module of the `salt.cache` package. Default is `localfs`. :param serial: The module of `salt.serializers` package that should be used by the cache driver to store data. If a driver can't use a specific module or uses specific objects storage it can ignore this parameter. Terminology. Salt cache subsystem is organized as a tree with nodes and leafs like a filesystem. Cache consists of banks. Each bank can contain a number of keys. Each key can contain a dict or any other object serializable with `salt.payload.Serial`. I.e. any data object in the cache can be addressed by the path to the bank and the key name: bank: 'minions/alpha' key: 'data' Bank names should be formatted in a way that can be used as a directory structure. If slashes are included in the name, then they refer to a nested structure. Key name is a string identifier of a data container (like a file inside a directory) which will hold the data. ''' def __init__(self, opts, cachedir=None, **kwargs): self.opts = opts if cachedir is None: self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR) else: self.cachedir = cachedir self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache']) self.serial = Serial(opts) self._modules = None self._kwargs = kwargs self._kwargs['cachedir'] = self.cachedir def __lazy_init(self): self._modules = salt.loader.cache(self.opts, self.serial) fun = '{0}.init_kwargs'.format(self.driver) if fun in self.modules: self._kwargs = self.modules[fun](self._kwargs) else: self._kwargs = {} @property def modules(self): if self._modules is None: self.__lazy_init() return self._modules def cache(self, bank, key, fun, loop_fun=None, **kwargs): ''' Check cache for the data. If it is there, check to see if it needs to be refreshed. If the data is not there, or it needs to be refreshed, then call the callback function (``fun``) with any given ``**kwargs``. In some cases, the callback function returns a list of objects which need to be processed by a second function. If that is the case, then the second function is passed in as ``loop_fun``. Each item in the return list from the first function will be the only argument for the second function. ''' expire_seconds = kwargs.get('expire', 86400) # 1 day updated = self.updated(bank, key) update_cache = False if updated is None: update_cache = True else: if int(time.time()) - updated > expire_seconds: update_cache = True data = self.fetch(bank, key) if not data or update_cache is True: if loop_fun is not None: data = [] items = fun(**kwargs) for item in items: data.append(loop_fun(item)) else: data = fun(**kwargs) self.store(bank, key, data) return data def store(self, bank, key, data): ''' Store data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :param data: The data which will be stored in the cache. This data should be in a format which can be serialized by msgpack/json/yaml/etc. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.store'.format(self.driver) return self.modules[fun](bank, key, data, **self._kwargs) def updated(self, bank, key): ''' Get the last updated epoch for the specified key :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return an int epoch time in seconds or None if the object wasn't found in cache. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.updated'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) def flush(self, bank, key=None): ''' Remove the key from the cache bank with all the key content. If no key is specified remove the entire bank with all keys and sub-banks inside. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.flush'.format(self.driver) return self.modules[fun](bank, key=key, **self._kwargs) def list(self, bank): ''' Lists entries stored in the specified bank. :param bank: The name of the location inside the cache which will hold the key and its associated data. :return: An iterable object containing all bank entries. Returns an empty iterator if the bank doesn't exists. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.list'.format(self.driver) return self.modules[fun](bank, **self._kwargs) def contains(self, bank, key=None): ''' Checks if the specified bank contains the specified key. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Returns True if the specified key exists in the given bank and False if not. If key is None checks for the bank existense. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.contains'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs)
saltstack/salt
salt/cache/__init__.py
Cache.list
python
def list(self, bank): ''' Lists entries stored in the specified bank. :param bank: The name of the location inside the cache which will hold the key and its associated data. :return: An iterable object containing all bank entries. Returns an empty iterator if the bank doesn't exists. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.list'.format(self.driver) return self.modules[fun](bank, **self._kwargs)
Lists entries stored in the specified bank. :param bank: The name of the location inside the cache which will hold the key and its associated data. :return: An iterable object containing all bank entries. Returns an empty iterator if the bank doesn't exists. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc).
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cache/__init__.py#L227-L244
null
class Cache(object): ''' Base caching object providing access to the modular cache subsystem. Related configuration options: :param cache: The name of the cache driver to use. This is the name of the python module of the `salt.cache` package. Default is `localfs`. :param serial: The module of `salt.serializers` package that should be used by the cache driver to store data. If a driver can't use a specific module or uses specific objects storage it can ignore this parameter. Terminology. Salt cache subsystem is organized as a tree with nodes and leafs like a filesystem. Cache consists of banks. Each bank can contain a number of keys. Each key can contain a dict or any other object serializable with `salt.payload.Serial`. I.e. any data object in the cache can be addressed by the path to the bank and the key name: bank: 'minions/alpha' key: 'data' Bank names should be formatted in a way that can be used as a directory structure. If slashes are included in the name, then they refer to a nested structure. Key name is a string identifier of a data container (like a file inside a directory) which will hold the data. ''' def __init__(self, opts, cachedir=None, **kwargs): self.opts = opts if cachedir is None: self.cachedir = opts.get('cachedir', salt.syspaths.CACHE_DIR) else: self.cachedir = cachedir self.driver = opts.get('cache', salt.config.DEFAULT_MASTER_OPTS['cache']) self.serial = Serial(opts) self._modules = None self._kwargs = kwargs self._kwargs['cachedir'] = self.cachedir def __lazy_init(self): self._modules = salt.loader.cache(self.opts, self.serial) fun = '{0}.init_kwargs'.format(self.driver) if fun in self.modules: self._kwargs = self.modules[fun](self._kwargs) else: self._kwargs = {} @property def modules(self): if self._modules is None: self.__lazy_init() return self._modules def cache(self, bank, key, fun, loop_fun=None, **kwargs): ''' Check cache for the data. If it is there, check to see if it needs to be refreshed. If the data is not there, or it needs to be refreshed, then call the callback function (``fun``) with any given ``**kwargs``. In some cases, the callback function returns a list of objects which need to be processed by a second function. If that is the case, then the second function is passed in as ``loop_fun``. Each item in the return list from the first function will be the only argument for the second function. ''' expire_seconds = kwargs.get('expire', 86400) # 1 day updated = self.updated(bank, key) update_cache = False if updated is None: update_cache = True else: if int(time.time()) - updated > expire_seconds: update_cache = True data = self.fetch(bank, key) if not data or update_cache is True: if loop_fun is not None: data = [] items = fun(**kwargs) for item in items: data.append(loop_fun(item)) else: data = fun(**kwargs) self.store(bank, key, data) return data def store(self, bank, key, data): ''' Store data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :param data: The data which will be stored in the cache. This data should be in a format which can be serialized by msgpack/json/yaml/etc. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.store'.format(self.driver) return self.modules[fun](bank, key, data, **self._kwargs) def fetch(self, bank, key): ''' Fetch data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return a python object fetched from the cache or an empty dict if the given path or key not found. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.fetch'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) def updated(self, bank, key): ''' Get the last updated epoch for the specified key :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Return an int epoch time in seconds or None if the object wasn't found in cache. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.updated'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs) def flush(self, bank, key=None): ''' Remove the key from the cache bank with all the key content. If no key is specified remove the entire bank with all keys and sub-banks inside. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.flush'.format(self.driver) return self.modules[fun](bank, key=key, **self._kwargs) def contains(self, bank, key=None): ''' Checks if the specified bank contains the specified key. :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :return: Returns True if the specified key exists in the given bank and False if not. If key is None checks for the bank existense. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.contains'.format(self.driver) return self.modules[fun](bank, key, **self._kwargs)
saltstack/salt
salt/wheel/error.py
error
python
def error(name=None, message=''): ''' If name is None Then return empty dict Otherwise raise an exception with __name__ from name, message from message CLI Example: .. code-block:: bash salt-wheel error salt-wheel error.error name="Exception" message="This is an error." ''' ret = {} if name is not None: salt.utils.error.raise_error(name=name, message=message) return ret
If name is None Then return empty dict Otherwise raise an exception with __name__ from name, message from message CLI Example: .. code-block:: bash salt-wheel error salt-wheel error.error name="Exception" message="This is an error."
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/wheel/error.py#L15-L31
[ "def raise_error(name=None, args=None, message=''):\n '''\n Raise an exception with __name__ from name, args from args\n If args is None Otherwise message from message\\\n If name is empty then use \"Exception\"\n '''\n name = name or 'Exception'\n if hasattr(salt.exceptions, name):\n ex = getattr(salt.exceptions, name)\n elif hasattr(exceptions, name):\n ex = getattr(exceptions, name)\n else:\n name = 'SaltException'\n ex = getattr(salt.exceptions, name)\n if args is not None:\n raise ex(*args)\n else:\n raise ex(message)\n" ]
# -*- coding: utf-8 -*- ''' Error generator to enable integration testing of salt wheel error handling ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs # Import salt libs import salt.utils.error
saltstack/salt
salt/states/boto_asg.py
present
python
def present( name, launch_config_name, availability_zones, min_size, max_size, launch_config=None, desired_capacity=None, load_balancers=None, default_cooldown=None, health_check_type=None, health_check_period=None, placement_group=None, vpc_zone_identifier=None, subnet_names=None, tags=None, termination_policies=None, termination_policies_from_pillar='boto_asg_termination_policies', suspended_processes=None, scaling_policies=None, scaling_policies_from_pillar='boto_asg_scaling_policies', scheduled_actions=None, scheduled_actions_from_pillar='boto_asg_scheduled_actions', alarms=None, alarms_from_pillar='boto_asg_alarms', region=None, key=None, keyid=None, profile=None, notification_arn=None, notification_arn_from_pillar='boto_asg_notification_arn', notification_types=None, notification_types_from_pillar='boto_asg_notification_types'): ''' Ensure the autoscale group exists. name Name of the autoscale group. launch_config_name Name of the launch config to use for the group. Or, if ``launch_config`` is specified, this will be the launch config name's prefix. (see below) launch_config A dictionary of launch config attributes. If specified, a launch config will be used or created, matching this set of attributes, and the autoscale group will be set to use that launch config. The launch config name will be the ``launch_config_name`` followed by a hyphen followed by a hash of the ``launch_config`` dict contents. Example: .. code-block:: yaml my_asg: boto_asg.present: - launch_config: - ebs_optimized: false - instance_profile_name: my_iam_profile - kernel_id: '' - ramdisk_id: '' - key_name: my_ssh_key - image_name: aws2015091-hvm - instance_type: c3.xlarge - instance_monitoring: false - security_groups: - my_sec_group_01 - my_sec_group_02 availability_zones List of availability zones for the group. min_size Minimum size of the group. max_size Maximum size of the group. desired_capacity The desired capacity of the group. load_balancers List of load balancers for the group. Once set this can not be updated (Amazon restriction). default_cooldown Number of seconds after a Scaling Activity completes before any further scaling activities can start. health_check_type The service you want the health status from, Amazon EC2 or Elastic Load Balancer (EC2 or ELB). health_check_period Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. placement_group Physical location of your cluster placement group created in Amazon EC2. Once set this can not be updated (Amazon restriction). vpc_zone_identifier A list of the subnet identifiers of the Virtual Private Cloud. subnet_names For VPC, a list of subnet names (NOT subnet IDs) to deploy into. Exclusive with vpc_zone_identifier. tags A list of tags. Example: .. code-block:: yaml - key: 'key' value: 'value' propagate_at_launch: true termination_policies A list of termination policies. Valid values are: * ``OldestInstance`` * ``NewestInstance`` * ``OldestLaunchConfiguration`` * ``ClosestToNextInstanceHour`` * ``Default`` If no value is specified, the ``Default`` value is used. termination_policies_from_pillar: name of pillar dict that contains termination policy settings. Termination policies defined for this specific state will override those from pillar. suspended_processes List of processes to be suspended. see http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html scaling_policies List of scaling policies. Each policy is a dict of key-values described by https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy scaling_policies_from_pillar: name of pillar dict that contains scaling policy settings. Scaling policies defined for this specific state will override those from pillar. scheduled_actions: a dictionary of scheduled actions. Each key is the name of scheduled action and each value is dictionary of options. For example: .. code-block:: yaml - scheduled_actions: scale_up_at_10: desired_capacity: 4 min_size: 3 max_size: 5 recurrence: "0 9 * * 1-5" scale_down_at_7: desired_capacity: 1 min_size: 1 max_size: 1 recurrence: "0 19 * * 1-5" scheduled_actions_from_pillar: name of pillar dict that contains scheduled_actions settings. Scheduled actions for this specific state will override those from pillar. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG. All attributes should be specified except for dimension which will be automatically set to this ASG. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. If any alarm actions include ":self:" this will be replaced with the asg name. For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will map to the arn for this asg's scaling policy named "ScaleUp". In addition, any alarms that have only scaling_policy as actions will be ignored if min_size is equal to max_size for this ASG. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. notification_arn The AWS arn that notifications will be sent to notification_arn_from_pillar name of the pillar dict that contains ``notifcation_arn`` settings. A ``notification_arn`` defined for this specific state will override the one from pillar. notification_types A list of event names that will trigger a notification. The list of valid notification types is: * ``autoscaling:EC2_INSTANCE_LAUNCH`` * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR`` * ``autoscaling:EC2_INSTANCE_TERMINATE`` * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR`` * ``autoscaling:TEST_NOTIFICATION`` notification_types_from_pillar name of the pillar dict that contains ``notifcation_types`` settings. ``notification_types`` defined for this specific state will override those from the pillar. ''' if vpc_zone_identifier and subnet_names: raise SaltInvocationError('vpc_zone_identifier and subnet_names are ' 'mutually exclusive options.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if subnet_names: vpc_zone_identifier = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret vpc_zone_identifier.append(r['id']) if vpc_zone_identifier: vpc_id = __salt__['boto_vpc.get_subnet_association']( vpc_zone_identifier, region, key, keyid, profile ) vpc_id = vpc_id.get('vpc_id') log.debug('Auto Scaling Group %s is associated with VPC ID %s', name, vpc_id) else: vpc_id = None log.debug('Auto Scaling Group %s has no VPC Association', name) # if launch_config is defined, manage the launch config first. # hash the launch_config dict to create a unique name suffix and then # ensure it is present if launch_config: launch_config_bytes = salt.utils.stringutils.to_bytes(str(launch_config)) # future lint: disable=blacklisted-function launch_config_name = launch_config_name + '-' + hashlib.md5(launch_config_bytes).hexdigest() args = { 'name': launch_config_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } for index, item in enumerate(launch_config): if 'image_name' in item: image_name = item['image_name'] iargs = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**iargs) if image_ids: # find_images() returns False on failure launch_config[index]['image_id'] = image_ids[0] else: log.warning("Couldn't find AMI named `%s`, passing literally.", image_name) launch_config[index]['image_id'] = image_name del launch_config[index]['image_name'] break if vpc_id: log.debug('Auto Scaling Group {0} is a associated with a vpc') # locate the security groups attribute of a launch config sg_index = None for index, item in enumerate(launch_config): if 'security_groups' in item: sg_index = index break # if security groups exist within launch_config then convert # to group ids if sg_index is not None: log.debug('security group associations found in launch config') _group_ids = __salt__['boto_secgroup.convert_to_group_ids']( launch_config[sg_index]['security_groups'], vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) launch_config[sg_index]['security_groups'] = _group_ids for d in launch_config: args.update(d) if not __opts__['test']: lc_ret = __states__['boto_lc.present'](**args) if lc_ret['result'] is True and lc_ret['changes']: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config'] = lc_ret['changes'] asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) termination_policies = _determine_termination_policies( termination_policies, termination_policies_from_pillar ) scaling_policies = _determine_scaling_policies( scaling_policies, scaling_policies_from_pillar ) scheduled_actions = _determine_scheduled_actions( scheduled_actions, scheduled_actions_from_pillar ) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif not asg: if __opts__['test']: msg = 'Autoscale group set to be created.' ret['comment'] = msg ret['result'] = None return ret notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) created = __salt__['boto_asg.create'](name, launch_config_name, availability_zones, min_size, max_size, desired_capacity, load_balancers, default_cooldown, health_check_type, health_check_period, placement_group, vpc_zone_identifier, tags, termination_policies, suspended_processes, scaling_policies, scheduled_actions, region, notification_arn, notification_types, key, keyid, profile) if created: ret['changes']['old'] = None asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg else: ret['result'] = False ret['comment'] = 'Failed to create autoscale group' else: need_update = False # If any of these attributes can't be modified after creation # time, we should remove them from the dict. if scaling_policies: for policy in scaling_policies: if 'min_adjustment_step' not in policy: policy['min_adjustment_step'] = None if scheduled_actions: for s_name, action in six.iteritems(scheduled_actions): if 'end_time' not in action: action['end_time'] = None config = { 'launch_config_name': launch_config_name, 'availability_zones': availability_zones, 'min_size': min_size, 'max_size': max_size, 'desired_capacity': desired_capacity, 'default_cooldown': default_cooldown, 'health_check_type': health_check_type, 'health_check_period': health_check_period, 'vpc_zone_identifier': vpc_zone_identifier, 'tags': tags, 'termination_policies': termination_policies, 'suspended_processes': suspended_processes, 'scaling_policies': scaling_policies, 'scheduled_actions': scheduled_actions } #ensure that we reset termination_policies to default if none are specified if not termination_policies: config['termination_policies'] = ['Default'] if suspended_processes is None: config['suspended_processes'] = [] # ensure that we delete scaling_policies if none are specified if scaling_policies is None: config['scaling_policies'] = [] # ensure that we delete scheduled_actions if none are specified if scheduled_actions is None: config['scheduled_actions'] = {} # allow defaults on start_time for s_name, action in six.iteritems(scheduled_actions): if 'start_time' not in action: asg_action = asg['scheduled_actions'].get(s_name, {}) if 'start_time' in asg_action: del asg_action['start_time'] proposed = {} # note: do not loop using "key, value" - this can modify the value of # the aws access key for asg_property, value in six.iteritems(config): # Only modify values being specified; introspection is difficult # otherwise since it's hard to track default values, which will # always be returned from AWS. if value is None: continue value = __utils__['boto3.ordered'](value) if asg_property in asg: _value = __utils__['boto3.ordered'](asg[asg_property]) if not value == _value: log.debug('%s asg_property differs from %s', value, _value) proposed.setdefault('old', {}).update({asg_property: _value}) proposed.setdefault('new', {}).update({asg_property: value}) need_update = True if need_update: if __opts__['test']: msg = 'Autoscale group set to be updated.' ret['comment'] = msg ret['result'] = None ret['changes'] = proposed return ret # add in alarms notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) updated, msg = __salt__['boto_asg.update']( name, launch_config_name, availability_zones, min_size, max_size, desired_capacity=desired_capacity, load_balancers=load_balancers, default_cooldown=default_cooldown, health_check_type=health_check_type, health_check_period=health_check_period, placement_group=placement_group, vpc_zone_identifier=vpc_zone_identifier, tags=tags, termination_policies=termination_policies, suspended_processes=suspended_processes, scaling_policies=scaling_policies, scheduled_actions=scheduled_actions, region=region, notification_arn=notification_arn, notification_types=notification_types, key=key, keyid=keyid, profile=profile ) if asg['launch_config_name'] != launch_config_name: # delete the old launch_config_name deleted = __salt__['boto_asg.delete_launch_configuration']( asg['launch_config_name'], region=region, key=key, keyid=keyid, profile=profile ) if deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] if updated: ret['changes']['old'] = asg asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg ret['comment'] = 'Updated autoscale group.' else: ret['result'] = False ret['comment'] = msg else: ret['comment'] = 'Autoscale group present.' # add in alarms _ret = _alarms_present( name, min_size == max_size, alarms, alarms_from_pillar, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret
Ensure the autoscale group exists. name Name of the autoscale group. launch_config_name Name of the launch config to use for the group. Or, if ``launch_config`` is specified, this will be the launch config name's prefix. (see below) launch_config A dictionary of launch config attributes. If specified, a launch config will be used or created, matching this set of attributes, and the autoscale group will be set to use that launch config. The launch config name will be the ``launch_config_name`` followed by a hyphen followed by a hash of the ``launch_config`` dict contents. Example: .. code-block:: yaml my_asg: boto_asg.present: - launch_config: - ebs_optimized: false - instance_profile_name: my_iam_profile - kernel_id: '' - ramdisk_id: '' - key_name: my_ssh_key - image_name: aws2015091-hvm - instance_type: c3.xlarge - instance_monitoring: false - security_groups: - my_sec_group_01 - my_sec_group_02 availability_zones List of availability zones for the group. min_size Minimum size of the group. max_size Maximum size of the group. desired_capacity The desired capacity of the group. load_balancers List of load balancers for the group. Once set this can not be updated (Amazon restriction). default_cooldown Number of seconds after a Scaling Activity completes before any further scaling activities can start. health_check_type The service you want the health status from, Amazon EC2 or Elastic Load Balancer (EC2 or ELB). health_check_period Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. placement_group Physical location of your cluster placement group created in Amazon EC2. Once set this can not be updated (Amazon restriction). vpc_zone_identifier A list of the subnet identifiers of the Virtual Private Cloud. subnet_names For VPC, a list of subnet names (NOT subnet IDs) to deploy into. Exclusive with vpc_zone_identifier. tags A list of tags. Example: .. code-block:: yaml - key: 'key' value: 'value' propagate_at_launch: true termination_policies A list of termination policies. Valid values are: * ``OldestInstance`` * ``NewestInstance`` * ``OldestLaunchConfiguration`` * ``ClosestToNextInstanceHour`` * ``Default`` If no value is specified, the ``Default`` value is used. termination_policies_from_pillar: name of pillar dict that contains termination policy settings. Termination policies defined for this specific state will override those from pillar. suspended_processes List of processes to be suspended. see http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html scaling_policies List of scaling policies. Each policy is a dict of key-values described by https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy scaling_policies_from_pillar: name of pillar dict that contains scaling policy settings. Scaling policies defined for this specific state will override those from pillar. scheduled_actions: a dictionary of scheduled actions. Each key is the name of scheduled action and each value is dictionary of options. For example: .. code-block:: yaml - scheduled_actions: scale_up_at_10: desired_capacity: 4 min_size: 3 max_size: 5 recurrence: "0 9 * * 1-5" scale_down_at_7: desired_capacity: 1 min_size: 1 max_size: 1 recurrence: "0 19 * * 1-5" scheduled_actions_from_pillar: name of pillar dict that contains scheduled_actions settings. Scheduled actions for this specific state will override those from pillar. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG. All attributes should be specified except for dimension which will be automatically set to this ASG. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. If any alarm actions include ":self:" this will be replaced with the asg name. For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will map to the arn for this asg's scaling policy named "ScaleUp". In addition, any alarms that have only scaling_policy as actions will be ignored if min_size is equal to max_size for this ASG. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. notification_arn The AWS arn that notifications will be sent to notification_arn_from_pillar name of the pillar dict that contains ``notifcation_arn`` settings. A ``notification_arn`` defined for this specific state will override the one from pillar. notification_types A list of event names that will trigger a notification. The list of valid notification types is: * ``autoscaling:EC2_INSTANCE_LAUNCH`` * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR`` * ``autoscaling:EC2_INSTANCE_TERMINATE`` * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR`` * ``autoscaling:TEST_NOTIFICATION`` notification_types_from_pillar name of the pillar dict that contains ``notifcation_types`` settings. ``notification_types`` defined for this specific state will override those from the pillar.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_asg.py#L217-L709
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n", "def _determine_termination_policies(termination_policies, termination_policies_from_pillar):\n '''\n helper method for present. ensure that termination_policies are set\n '''\n pillar_termination_policies = copy.deepcopy(\n __salt__['config.option'](termination_policies_from_pillar, [])\n )\n if not termination_policies and pillar_termination_policies:\n termination_policies = pillar_termination_policies\n return termination_policies\n", "def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar):\n '''\n helper method for present. ensure that scaling_policies are set\n '''\n pillar_scaling_policies = copy.deepcopy(\n __salt__['config.option'](scaling_policies_from_pillar, {})\n )\n if not scaling_policies and pillar_scaling_policies:\n scaling_policies = pillar_scaling_policies\n return scaling_policies\n", "def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar):\n '''\n helper method for present, ensure scheduled actions are setup\n '''\n tmp = copy.deepcopy(\n __salt__['config.option'](scheduled_actions_from_pillar, {})\n )\n # merge with data from state\n if scheduled_actions:\n tmp = dictupdate.update(tmp, scheduled_actions)\n return tmp\n", "def _determine_notification_info(notification_arn,\n notification_arn_from_pillar,\n notification_types,\n notification_types_from_pillar):\n '''\n helper method for present. ensure that notification_configs are set\n '''\n pillar_arn_list = copy.deepcopy(\n __salt__['config.option'](notification_arn_from_pillar, {})\n )\n pillar_arn = None\n if pillar_arn_list:\n pillar_arn = pillar_arn_list[0]\n pillar_notification_types = copy.deepcopy(\n __salt__['config.option'](notification_types_from_pillar, {})\n )\n arn = notification_arn if notification_arn else pillar_arn\n types = notification_types if notification_types else pillar_notification_types\n return (arn, types)\n", "def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar, region, key, keyid, profile):\n '''\n helper method for present. ensure that cloudwatch_alarms are set\n '''\n # load data from alarms_from_pillar\n tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {}))\n # merge with data from alarms\n if alarms:\n tmp = dictupdate.update(tmp, alarms)\n # set alarms, using boto_cloudwatch_alarm.present\n merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}}\n for _, info in six.iteritems(tmp):\n # add asg to name and description\n info['name'] = name + ' ' + info['name']\n info['attributes']['description'] = name + ' ' + info['attributes']['description']\n # add dimension attribute\n if 'dimensions' not in info['attributes']:\n info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]}\n scaling_policy_actions_only = True\n # replace \":self:\" with our name\n for action_type in ['alarm_actions', 'insufficient_data_actions', 'ok_actions']:\n if action_type in info['attributes']:\n new_actions = []\n for action in info['attributes'][action_type]:\n if 'scaling_policy' not in action:\n scaling_policy_actions_only = False\n if ':self:' in action:\n action = action.replace(':self:', ':{0}:'.format(name))\n new_actions.append(action)\n info['attributes'][action_type] = new_actions\n # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG\n if scaling_policy_actions_only and min_size_equals_max_size:\n continue\n # set alarm\n kwargs = {\n 'name': info['name'],\n 'attributes': info['attributes'],\n 'region': region,\n 'key': key,\n 'keyid': keyid,\n 'profile': profile,\n }\n results = __states__['boto_cloudwatch_alarm.present'](**kwargs)\n if not results['result']:\n merged_return_value['result'] = False\n if results.get('changes', {}) != {}:\n merged_return_value['changes'][info['name']] = results['changes']\n if 'comment' in results:\n merged_return_value['comment'] += results['comment']\n return merged_return_value\n" ]
# -*- coding: utf-8 -*- ''' Manage Autoscale Groups ======================= .. versionadded:: 2014.7.0 Create and destroy autoscale groups. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses boto, which can be installed via package, or pip. This module accepts explicit autoscale credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml asg.keyid: GKTADJGHEIQSXMKKRBJ08H asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - suspended_processes: - AddToLoadBalancer - AlarmNotification - scaling_policies - adjustment_type: ChangeInCapacity - as_name: api-production-iad - cooldown: 1800 - min_adjustment_step: None - name: ScaleDown - scaling_adjustment: -1 - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs # Using a profile from pillars. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile # Passing in a profile. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 # Deleting an autoscale group with running instances. Ensure myasg is deleted: boto_asg.absent: - name: myasg # If instances exist, we must force the deletion of the asg. - force: True It's possible to specify cloudwatch alarms that will be setup along with the ASG. Note the alarm name will be the name attribute defined, plus the ASG resource name. .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_asg_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for an ASG resource. Setting the alarms in a pillar: .. code-block:: yaml my_asg_alarm: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms_from_pillar: my_asg_alarm # override CPU:attributes:threshold - alarms: CPU: attributes: threshold: 50.0 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import copy # Import Salt libs import salt.utils.dictupdate as dictupdate import salt.utils.stringutils from salt.ext import six from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_asg' if 'boto_asg.exists' in __salt__ else False def _determine_termination_policies(termination_policies, termination_policies_from_pillar): ''' helper method for present. ensure that termination_policies are set ''' pillar_termination_policies = copy.deepcopy( __salt__['config.option'](termination_policies_from_pillar, []) ) if not termination_policies and pillar_termination_policies: termination_policies = pillar_termination_policies return termination_policies def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar): ''' helper method for present. ensure that scaling_policies are set ''' pillar_scaling_policies = copy.deepcopy( __salt__['config.option'](scaling_policies_from_pillar, {}) ) if not scaling_policies and pillar_scaling_policies: scaling_policies = pillar_scaling_policies return scaling_policies def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar): ''' helper method for present, ensure scheduled actions are setup ''' tmp = copy.deepcopy( __salt__['config.option'](scheduled_actions_from_pillar, {}) ) # merge with data from state if scheduled_actions: tmp = dictupdate.update(tmp, scheduled_actions) return tmp def _determine_notification_info(notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar): ''' helper method for present. ensure that notification_configs are set ''' pillar_arn_list = copy.deepcopy( __salt__['config.option'](notification_arn_from_pillar, {}) ) pillar_arn = None if pillar_arn_list: pillar_arn = pillar_arn_list[0] pillar_notification_types = copy.deepcopy( __salt__['config.option'](notification_types_from_pillar, {}) ) arn = notification_arn if notification_arn else pillar_arn types = notification_types if notification_types else pillar_notification_types return (arn, types) def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar, region, key, keyid, profile): ''' helper method for present. ensure that cloudwatch_alarms are set ''' # load data from alarms_from_pillar tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {})) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add asg to name and description info['name'] = name + ' ' + info['name'] info['attributes']['description'] = name + ' ' + info['attributes']['description'] # add dimension attribute if 'dimensions' not in info['attributes']: info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]} scaling_policy_actions_only = True # replace ":self:" with our name for action_type in ['alarm_actions', 'insufficient_data_actions', 'ok_actions']: if action_type in info['attributes']: new_actions = [] for action in info['attributes'][action_type]: if 'scaling_policy' not in action: scaling_policy_actions_only = False if ':self:' in action: action = action.replace(':self:', ':{0}:'.format(name)) new_actions.append(action) info['attributes'][action_type] = new_actions # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG if scaling_policy_actions_only and min_size_equals_max_size: continue # set alarm kwargs = { 'name': info['name'], 'attributes': info['attributes'], 'region': region, 'key': key, 'keyid': keyid, 'profile': profile, } results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results['result']: merged_return_value['result'] = False if results.get('changes', {}) != {}: merged_return_value['changes'][info['name']] = results['changes'] if 'comment' in results: merged_return_value['comment'] += results['comment'] return merged_return_value def absent( name, force=False, region=None, key=None, keyid=None, profile=None, remove_lc=False): ''' Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif asg: if __opts__['test']: ret['comment'] = 'Autoscale group set to be deleted.' ret['result'] = None if remove_lc: msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name']) ret['comment'] = ' '.join([ret['comment'], msg]) return ret deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid, profile) if deleted: if remove_lc: lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'], region, key, keyid, profile) if lc_deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.']) ret['changes']['old'] = asg ret['changes']['new'] = None ret['comment'] = 'Deleted autoscale group.' else: ret['result'] = False ret['comment'] = 'Failed to delete autoscale group.' else: ret['comment'] = 'Autoscale group does not exist.' return ret
saltstack/salt
salt/states/boto_asg.py
_determine_termination_policies
python
def _determine_termination_policies(termination_policies, termination_policies_from_pillar): ''' helper method for present. ensure that termination_policies are set ''' pillar_termination_policies = copy.deepcopy( __salt__['config.option'](termination_policies_from_pillar, []) ) if not termination_policies and pillar_termination_policies: termination_policies = pillar_termination_policies return termination_policies
helper method for present. ensure that termination_policies are set
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_asg.py#L712-L721
null
# -*- coding: utf-8 -*- ''' Manage Autoscale Groups ======================= .. versionadded:: 2014.7.0 Create and destroy autoscale groups. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses boto, which can be installed via package, or pip. This module accepts explicit autoscale credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml asg.keyid: GKTADJGHEIQSXMKKRBJ08H asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - suspended_processes: - AddToLoadBalancer - AlarmNotification - scaling_policies - adjustment_type: ChangeInCapacity - as_name: api-production-iad - cooldown: 1800 - min_adjustment_step: None - name: ScaleDown - scaling_adjustment: -1 - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs # Using a profile from pillars. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile # Passing in a profile. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 # Deleting an autoscale group with running instances. Ensure myasg is deleted: boto_asg.absent: - name: myasg # If instances exist, we must force the deletion of the asg. - force: True It's possible to specify cloudwatch alarms that will be setup along with the ASG. Note the alarm name will be the name attribute defined, plus the ASG resource name. .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_asg_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for an ASG resource. Setting the alarms in a pillar: .. code-block:: yaml my_asg_alarm: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms_from_pillar: my_asg_alarm # override CPU:attributes:threshold - alarms: CPU: attributes: threshold: 50.0 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import copy # Import Salt libs import salt.utils.dictupdate as dictupdate import salt.utils.stringutils from salt.ext import six from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_asg' if 'boto_asg.exists' in __salt__ else False def present( name, launch_config_name, availability_zones, min_size, max_size, launch_config=None, desired_capacity=None, load_balancers=None, default_cooldown=None, health_check_type=None, health_check_period=None, placement_group=None, vpc_zone_identifier=None, subnet_names=None, tags=None, termination_policies=None, termination_policies_from_pillar='boto_asg_termination_policies', suspended_processes=None, scaling_policies=None, scaling_policies_from_pillar='boto_asg_scaling_policies', scheduled_actions=None, scheduled_actions_from_pillar='boto_asg_scheduled_actions', alarms=None, alarms_from_pillar='boto_asg_alarms', region=None, key=None, keyid=None, profile=None, notification_arn=None, notification_arn_from_pillar='boto_asg_notification_arn', notification_types=None, notification_types_from_pillar='boto_asg_notification_types'): ''' Ensure the autoscale group exists. name Name of the autoscale group. launch_config_name Name of the launch config to use for the group. Or, if ``launch_config`` is specified, this will be the launch config name's prefix. (see below) launch_config A dictionary of launch config attributes. If specified, a launch config will be used or created, matching this set of attributes, and the autoscale group will be set to use that launch config. The launch config name will be the ``launch_config_name`` followed by a hyphen followed by a hash of the ``launch_config`` dict contents. Example: .. code-block:: yaml my_asg: boto_asg.present: - launch_config: - ebs_optimized: false - instance_profile_name: my_iam_profile - kernel_id: '' - ramdisk_id: '' - key_name: my_ssh_key - image_name: aws2015091-hvm - instance_type: c3.xlarge - instance_monitoring: false - security_groups: - my_sec_group_01 - my_sec_group_02 availability_zones List of availability zones for the group. min_size Minimum size of the group. max_size Maximum size of the group. desired_capacity The desired capacity of the group. load_balancers List of load balancers for the group. Once set this can not be updated (Amazon restriction). default_cooldown Number of seconds after a Scaling Activity completes before any further scaling activities can start. health_check_type The service you want the health status from, Amazon EC2 or Elastic Load Balancer (EC2 or ELB). health_check_period Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. placement_group Physical location of your cluster placement group created in Amazon EC2. Once set this can not be updated (Amazon restriction). vpc_zone_identifier A list of the subnet identifiers of the Virtual Private Cloud. subnet_names For VPC, a list of subnet names (NOT subnet IDs) to deploy into. Exclusive with vpc_zone_identifier. tags A list of tags. Example: .. code-block:: yaml - key: 'key' value: 'value' propagate_at_launch: true termination_policies A list of termination policies. Valid values are: * ``OldestInstance`` * ``NewestInstance`` * ``OldestLaunchConfiguration`` * ``ClosestToNextInstanceHour`` * ``Default`` If no value is specified, the ``Default`` value is used. termination_policies_from_pillar: name of pillar dict that contains termination policy settings. Termination policies defined for this specific state will override those from pillar. suspended_processes List of processes to be suspended. see http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html scaling_policies List of scaling policies. Each policy is a dict of key-values described by https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy scaling_policies_from_pillar: name of pillar dict that contains scaling policy settings. Scaling policies defined for this specific state will override those from pillar. scheduled_actions: a dictionary of scheduled actions. Each key is the name of scheduled action and each value is dictionary of options. For example: .. code-block:: yaml - scheduled_actions: scale_up_at_10: desired_capacity: 4 min_size: 3 max_size: 5 recurrence: "0 9 * * 1-5" scale_down_at_7: desired_capacity: 1 min_size: 1 max_size: 1 recurrence: "0 19 * * 1-5" scheduled_actions_from_pillar: name of pillar dict that contains scheduled_actions settings. Scheduled actions for this specific state will override those from pillar. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG. All attributes should be specified except for dimension which will be automatically set to this ASG. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. If any alarm actions include ":self:" this will be replaced with the asg name. For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will map to the arn for this asg's scaling policy named "ScaleUp". In addition, any alarms that have only scaling_policy as actions will be ignored if min_size is equal to max_size for this ASG. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. notification_arn The AWS arn that notifications will be sent to notification_arn_from_pillar name of the pillar dict that contains ``notifcation_arn`` settings. A ``notification_arn`` defined for this specific state will override the one from pillar. notification_types A list of event names that will trigger a notification. The list of valid notification types is: * ``autoscaling:EC2_INSTANCE_LAUNCH`` * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR`` * ``autoscaling:EC2_INSTANCE_TERMINATE`` * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR`` * ``autoscaling:TEST_NOTIFICATION`` notification_types_from_pillar name of the pillar dict that contains ``notifcation_types`` settings. ``notification_types`` defined for this specific state will override those from the pillar. ''' if vpc_zone_identifier and subnet_names: raise SaltInvocationError('vpc_zone_identifier and subnet_names are ' 'mutually exclusive options.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if subnet_names: vpc_zone_identifier = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret vpc_zone_identifier.append(r['id']) if vpc_zone_identifier: vpc_id = __salt__['boto_vpc.get_subnet_association']( vpc_zone_identifier, region, key, keyid, profile ) vpc_id = vpc_id.get('vpc_id') log.debug('Auto Scaling Group %s is associated with VPC ID %s', name, vpc_id) else: vpc_id = None log.debug('Auto Scaling Group %s has no VPC Association', name) # if launch_config is defined, manage the launch config first. # hash the launch_config dict to create a unique name suffix and then # ensure it is present if launch_config: launch_config_bytes = salt.utils.stringutils.to_bytes(str(launch_config)) # future lint: disable=blacklisted-function launch_config_name = launch_config_name + '-' + hashlib.md5(launch_config_bytes).hexdigest() args = { 'name': launch_config_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } for index, item in enumerate(launch_config): if 'image_name' in item: image_name = item['image_name'] iargs = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**iargs) if image_ids: # find_images() returns False on failure launch_config[index]['image_id'] = image_ids[0] else: log.warning("Couldn't find AMI named `%s`, passing literally.", image_name) launch_config[index]['image_id'] = image_name del launch_config[index]['image_name'] break if vpc_id: log.debug('Auto Scaling Group {0} is a associated with a vpc') # locate the security groups attribute of a launch config sg_index = None for index, item in enumerate(launch_config): if 'security_groups' in item: sg_index = index break # if security groups exist within launch_config then convert # to group ids if sg_index is not None: log.debug('security group associations found in launch config') _group_ids = __salt__['boto_secgroup.convert_to_group_ids']( launch_config[sg_index]['security_groups'], vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) launch_config[sg_index]['security_groups'] = _group_ids for d in launch_config: args.update(d) if not __opts__['test']: lc_ret = __states__['boto_lc.present'](**args) if lc_ret['result'] is True and lc_ret['changes']: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config'] = lc_ret['changes'] asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) termination_policies = _determine_termination_policies( termination_policies, termination_policies_from_pillar ) scaling_policies = _determine_scaling_policies( scaling_policies, scaling_policies_from_pillar ) scheduled_actions = _determine_scheduled_actions( scheduled_actions, scheduled_actions_from_pillar ) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif not asg: if __opts__['test']: msg = 'Autoscale group set to be created.' ret['comment'] = msg ret['result'] = None return ret notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) created = __salt__['boto_asg.create'](name, launch_config_name, availability_zones, min_size, max_size, desired_capacity, load_balancers, default_cooldown, health_check_type, health_check_period, placement_group, vpc_zone_identifier, tags, termination_policies, suspended_processes, scaling_policies, scheduled_actions, region, notification_arn, notification_types, key, keyid, profile) if created: ret['changes']['old'] = None asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg else: ret['result'] = False ret['comment'] = 'Failed to create autoscale group' else: need_update = False # If any of these attributes can't be modified after creation # time, we should remove them from the dict. if scaling_policies: for policy in scaling_policies: if 'min_adjustment_step' not in policy: policy['min_adjustment_step'] = None if scheduled_actions: for s_name, action in six.iteritems(scheduled_actions): if 'end_time' not in action: action['end_time'] = None config = { 'launch_config_name': launch_config_name, 'availability_zones': availability_zones, 'min_size': min_size, 'max_size': max_size, 'desired_capacity': desired_capacity, 'default_cooldown': default_cooldown, 'health_check_type': health_check_type, 'health_check_period': health_check_period, 'vpc_zone_identifier': vpc_zone_identifier, 'tags': tags, 'termination_policies': termination_policies, 'suspended_processes': suspended_processes, 'scaling_policies': scaling_policies, 'scheduled_actions': scheduled_actions } #ensure that we reset termination_policies to default if none are specified if not termination_policies: config['termination_policies'] = ['Default'] if suspended_processes is None: config['suspended_processes'] = [] # ensure that we delete scaling_policies if none are specified if scaling_policies is None: config['scaling_policies'] = [] # ensure that we delete scheduled_actions if none are specified if scheduled_actions is None: config['scheduled_actions'] = {} # allow defaults on start_time for s_name, action in six.iteritems(scheduled_actions): if 'start_time' not in action: asg_action = asg['scheduled_actions'].get(s_name, {}) if 'start_time' in asg_action: del asg_action['start_time'] proposed = {} # note: do not loop using "key, value" - this can modify the value of # the aws access key for asg_property, value in six.iteritems(config): # Only modify values being specified; introspection is difficult # otherwise since it's hard to track default values, which will # always be returned from AWS. if value is None: continue value = __utils__['boto3.ordered'](value) if asg_property in asg: _value = __utils__['boto3.ordered'](asg[asg_property]) if not value == _value: log.debug('%s asg_property differs from %s', value, _value) proposed.setdefault('old', {}).update({asg_property: _value}) proposed.setdefault('new', {}).update({asg_property: value}) need_update = True if need_update: if __opts__['test']: msg = 'Autoscale group set to be updated.' ret['comment'] = msg ret['result'] = None ret['changes'] = proposed return ret # add in alarms notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) updated, msg = __salt__['boto_asg.update']( name, launch_config_name, availability_zones, min_size, max_size, desired_capacity=desired_capacity, load_balancers=load_balancers, default_cooldown=default_cooldown, health_check_type=health_check_type, health_check_period=health_check_period, placement_group=placement_group, vpc_zone_identifier=vpc_zone_identifier, tags=tags, termination_policies=termination_policies, suspended_processes=suspended_processes, scaling_policies=scaling_policies, scheduled_actions=scheduled_actions, region=region, notification_arn=notification_arn, notification_types=notification_types, key=key, keyid=keyid, profile=profile ) if asg['launch_config_name'] != launch_config_name: # delete the old launch_config_name deleted = __salt__['boto_asg.delete_launch_configuration']( asg['launch_config_name'], region=region, key=key, keyid=keyid, profile=profile ) if deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] if updated: ret['changes']['old'] = asg asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg ret['comment'] = 'Updated autoscale group.' else: ret['result'] = False ret['comment'] = msg else: ret['comment'] = 'Autoscale group present.' # add in alarms _ret = _alarms_present( name, min_size == max_size, alarms, alarms_from_pillar, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar): ''' helper method for present. ensure that scaling_policies are set ''' pillar_scaling_policies = copy.deepcopy( __salt__['config.option'](scaling_policies_from_pillar, {}) ) if not scaling_policies and pillar_scaling_policies: scaling_policies = pillar_scaling_policies return scaling_policies def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar): ''' helper method for present, ensure scheduled actions are setup ''' tmp = copy.deepcopy( __salt__['config.option'](scheduled_actions_from_pillar, {}) ) # merge with data from state if scheduled_actions: tmp = dictupdate.update(tmp, scheduled_actions) return tmp def _determine_notification_info(notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar): ''' helper method for present. ensure that notification_configs are set ''' pillar_arn_list = copy.deepcopy( __salt__['config.option'](notification_arn_from_pillar, {}) ) pillar_arn = None if pillar_arn_list: pillar_arn = pillar_arn_list[0] pillar_notification_types = copy.deepcopy( __salt__['config.option'](notification_types_from_pillar, {}) ) arn = notification_arn if notification_arn else pillar_arn types = notification_types if notification_types else pillar_notification_types return (arn, types) def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar, region, key, keyid, profile): ''' helper method for present. ensure that cloudwatch_alarms are set ''' # load data from alarms_from_pillar tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {})) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add asg to name and description info['name'] = name + ' ' + info['name'] info['attributes']['description'] = name + ' ' + info['attributes']['description'] # add dimension attribute if 'dimensions' not in info['attributes']: info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]} scaling_policy_actions_only = True # replace ":self:" with our name for action_type in ['alarm_actions', 'insufficient_data_actions', 'ok_actions']: if action_type in info['attributes']: new_actions = [] for action in info['attributes'][action_type]: if 'scaling_policy' not in action: scaling_policy_actions_only = False if ':self:' in action: action = action.replace(':self:', ':{0}:'.format(name)) new_actions.append(action) info['attributes'][action_type] = new_actions # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG if scaling_policy_actions_only and min_size_equals_max_size: continue # set alarm kwargs = { 'name': info['name'], 'attributes': info['attributes'], 'region': region, 'key': key, 'keyid': keyid, 'profile': profile, } results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results['result']: merged_return_value['result'] = False if results.get('changes', {}) != {}: merged_return_value['changes'][info['name']] = results['changes'] if 'comment' in results: merged_return_value['comment'] += results['comment'] return merged_return_value def absent( name, force=False, region=None, key=None, keyid=None, profile=None, remove_lc=False): ''' Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif asg: if __opts__['test']: ret['comment'] = 'Autoscale group set to be deleted.' ret['result'] = None if remove_lc: msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name']) ret['comment'] = ' '.join([ret['comment'], msg]) return ret deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid, profile) if deleted: if remove_lc: lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'], region, key, keyid, profile) if lc_deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.']) ret['changes']['old'] = asg ret['changes']['new'] = None ret['comment'] = 'Deleted autoscale group.' else: ret['result'] = False ret['comment'] = 'Failed to delete autoscale group.' else: ret['comment'] = 'Autoscale group does not exist.' return ret
saltstack/salt
salt/states/boto_asg.py
_determine_scaling_policies
python
def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar): ''' helper method for present. ensure that scaling_policies are set ''' pillar_scaling_policies = copy.deepcopy( __salt__['config.option'](scaling_policies_from_pillar, {}) ) if not scaling_policies and pillar_scaling_policies: scaling_policies = pillar_scaling_policies return scaling_policies
helper method for present. ensure that scaling_policies are set
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_asg.py#L724-L733
null
# -*- coding: utf-8 -*- ''' Manage Autoscale Groups ======================= .. versionadded:: 2014.7.0 Create and destroy autoscale groups. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses boto, which can be installed via package, or pip. This module accepts explicit autoscale credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml asg.keyid: GKTADJGHEIQSXMKKRBJ08H asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - suspended_processes: - AddToLoadBalancer - AlarmNotification - scaling_policies - adjustment_type: ChangeInCapacity - as_name: api-production-iad - cooldown: 1800 - min_adjustment_step: None - name: ScaleDown - scaling_adjustment: -1 - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs # Using a profile from pillars. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile # Passing in a profile. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 # Deleting an autoscale group with running instances. Ensure myasg is deleted: boto_asg.absent: - name: myasg # If instances exist, we must force the deletion of the asg. - force: True It's possible to specify cloudwatch alarms that will be setup along with the ASG. Note the alarm name will be the name attribute defined, plus the ASG resource name. .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_asg_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for an ASG resource. Setting the alarms in a pillar: .. code-block:: yaml my_asg_alarm: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms_from_pillar: my_asg_alarm # override CPU:attributes:threshold - alarms: CPU: attributes: threshold: 50.0 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import copy # Import Salt libs import salt.utils.dictupdate as dictupdate import salt.utils.stringutils from salt.ext import six from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_asg' if 'boto_asg.exists' in __salt__ else False def present( name, launch_config_name, availability_zones, min_size, max_size, launch_config=None, desired_capacity=None, load_balancers=None, default_cooldown=None, health_check_type=None, health_check_period=None, placement_group=None, vpc_zone_identifier=None, subnet_names=None, tags=None, termination_policies=None, termination_policies_from_pillar='boto_asg_termination_policies', suspended_processes=None, scaling_policies=None, scaling_policies_from_pillar='boto_asg_scaling_policies', scheduled_actions=None, scheduled_actions_from_pillar='boto_asg_scheduled_actions', alarms=None, alarms_from_pillar='boto_asg_alarms', region=None, key=None, keyid=None, profile=None, notification_arn=None, notification_arn_from_pillar='boto_asg_notification_arn', notification_types=None, notification_types_from_pillar='boto_asg_notification_types'): ''' Ensure the autoscale group exists. name Name of the autoscale group. launch_config_name Name of the launch config to use for the group. Or, if ``launch_config`` is specified, this will be the launch config name's prefix. (see below) launch_config A dictionary of launch config attributes. If specified, a launch config will be used or created, matching this set of attributes, and the autoscale group will be set to use that launch config. The launch config name will be the ``launch_config_name`` followed by a hyphen followed by a hash of the ``launch_config`` dict contents. Example: .. code-block:: yaml my_asg: boto_asg.present: - launch_config: - ebs_optimized: false - instance_profile_name: my_iam_profile - kernel_id: '' - ramdisk_id: '' - key_name: my_ssh_key - image_name: aws2015091-hvm - instance_type: c3.xlarge - instance_monitoring: false - security_groups: - my_sec_group_01 - my_sec_group_02 availability_zones List of availability zones for the group. min_size Minimum size of the group. max_size Maximum size of the group. desired_capacity The desired capacity of the group. load_balancers List of load balancers for the group. Once set this can not be updated (Amazon restriction). default_cooldown Number of seconds after a Scaling Activity completes before any further scaling activities can start. health_check_type The service you want the health status from, Amazon EC2 or Elastic Load Balancer (EC2 or ELB). health_check_period Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. placement_group Physical location of your cluster placement group created in Amazon EC2. Once set this can not be updated (Amazon restriction). vpc_zone_identifier A list of the subnet identifiers of the Virtual Private Cloud. subnet_names For VPC, a list of subnet names (NOT subnet IDs) to deploy into. Exclusive with vpc_zone_identifier. tags A list of tags. Example: .. code-block:: yaml - key: 'key' value: 'value' propagate_at_launch: true termination_policies A list of termination policies. Valid values are: * ``OldestInstance`` * ``NewestInstance`` * ``OldestLaunchConfiguration`` * ``ClosestToNextInstanceHour`` * ``Default`` If no value is specified, the ``Default`` value is used. termination_policies_from_pillar: name of pillar dict that contains termination policy settings. Termination policies defined for this specific state will override those from pillar. suspended_processes List of processes to be suspended. see http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html scaling_policies List of scaling policies. Each policy is a dict of key-values described by https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy scaling_policies_from_pillar: name of pillar dict that contains scaling policy settings. Scaling policies defined for this specific state will override those from pillar. scheduled_actions: a dictionary of scheduled actions. Each key is the name of scheduled action and each value is dictionary of options. For example: .. code-block:: yaml - scheduled_actions: scale_up_at_10: desired_capacity: 4 min_size: 3 max_size: 5 recurrence: "0 9 * * 1-5" scale_down_at_7: desired_capacity: 1 min_size: 1 max_size: 1 recurrence: "0 19 * * 1-5" scheduled_actions_from_pillar: name of pillar dict that contains scheduled_actions settings. Scheduled actions for this specific state will override those from pillar. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG. All attributes should be specified except for dimension which will be automatically set to this ASG. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. If any alarm actions include ":self:" this will be replaced with the asg name. For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will map to the arn for this asg's scaling policy named "ScaleUp". In addition, any alarms that have only scaling_policy as actions will be ignored if min_size is equal to max_size for this ASG. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. notification_arn The AWS arn that notifications will be sent to notification_arn_from_pillar name of the pillar dict that contains ``notifcation_arn`` settings. A ``notification_arn`` defined for this specific state will override the one from pillar. notification_types A list of event names that will trigger a notification. The list of valid notification types is: * ``autoscaling:EC2_INSTANCE_LAUNCH`` * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR`` * ``autoscaling:EC2_INSTANCE_TERMINATE`` * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR`` * ``autoscaling:TEST_NOTIFICATION`` notification_types_from_pillar name of the pillar dict that contains ``notifcation_types`` settings. ``notification_types`` defined for this specific state will override those from the pillar. ''' if vpc_zone_identifier and subnet_names: raise SaltInvocationError('vpc_zone_identifier and subnet_names are ' 'mutually exclusive options.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if subnet_names: vpc_zone_identifier = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret vpc_zone_identifier.append(r['id']) if vpc_zone_identifier: vpc_id = __salt__['boto_vpc.get_subnet_association']( vpc_zone_identifier, region, key, keyid, profile ) vpc_id = vpc_id.get('vpc_id') log.debug('Auto Scaling Group %s is associated with VPC ID %s', name, vpc_id) else: vpc_id = None log.debug('Auto Scaling Group %s has no VPC Association', name) # if launch_config is defined, manage the launch config first. # hash the launch_config dict to create a unique name suffix and then # ensure it is present if launch_config: launch_config_bytes = salt.utils.stringutils.to_bytes(str(launch_config)) # future lint: disable=blacklisted-function launch_config_name = launch_config_name + '-' + hashlib.md5(launch_config_bytes).hexdigest() args = { 'name': launch_config_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } for index, item in enumerate(launch_config): if 'image_name' in item: image_name = item['image_name'] iargs = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**iargs) if image_ids: # find_images() returns False on failure launch_config[index]['image_id'] = image_ids[0] else: log.warning("Couldn't find AMI named `%s`, passing literally.", image_name) launch_config[index]['image_id'] = image_name del launch_config[index]['image_name'] break if vpc_id: log.debug('Auto Scaling Group {0} is a associated with a vpc') # locate the security groups attribute of a launch config sg_index = None for index, item in enumerate(launch_config): if 'security_groups' in item: sg_index = index break # if security groups exist within launch_config then convert # to group ids if sg_index is not None: log.debug('security group associations found in launch config') _group_ids = __salt__['boto_secgroup.convert_to_group_ids']( launch_config[sg_index]['security_groups'], vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) launch_config[sg_index]['security_groups'] = _group_ids for d in launch_config: args.update(d) if not __opts__['test']: lc_ret = __states__['boto_lc.present'](**args) if lc_ret['result'] is True and lc_ret['changes']: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config'] = lc_ret['changes'] asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) termination_policies = _determine_termination_policies( termination_policies, termination_policies_from_pillar ) scaling_policies = _determine_scaling_policies( scaling_policies, scaling_policies_from_pillar ) scheduled_actions = _determine_scheduled_actions( scheduled_actions, scheduled_actions_from_pillar ) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif not asg: if __opts__['test']: msg = 'Autoscale group set to be created.' ret['comment'] = msg ret['result'] = None return ret notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) created = __salt__['boto_asg.create'](name, launch_config_name, availability_zones, min_size, max_size, desired_capacity, load_balancers, default_cooldown, health_check_type, health_check_period, placement_group, vpc_zone_identifier, tags, termination_policies, suspended_processes, scaling_policies, scheduled_actions, region, notification_arn, notification_types, key, keyid, profile) if created: ret['changes']['old'] = None asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg else: ret['result'] = False ret['comment'] = 'Failed to create autoscale group' else: need_update = False # If any of these attributes can't be modified after creation # time, we should remove them from the dict. if scaling_policies: for policy in scaling_policies: if 'min_adjustment_step' not in policy: policy['min_adjustment_step'] = None if scheduled_actions: for s_name, action in six.iteritems(scheduled_actions): if 'end_time' not in action: action['end_time'] = None config = { 'launch_config_name': launch_config_name, 'availability_zones': availability_zones, 'min_size': min_size, 'max_size': max_size, 'desired_capacity': desired_capacity, 'default_cooldown': default_cooldown, 'health_check_type': health_check_type, 'health_check_period': health_check_period, 'vpc_zone_identifier': vpc_zone_identifier, 'tags': tags, 'termination_policies': termination_policies, 'suspended_processes': suspended_processes, 'scaling_policies': scaling_policies, 'scheduled_actions': scheduled_actions } #ensure that we reset termination_policies to default if none are specified if not termination_policies: config['termination_policies'] = ['Default'] if suspended_processes is None: config['suspended_processes'] = [] # ensure that we delete scaling_policies if none are specified if scaling_policies is None: config['scaling_policies'] = [] # ensure that we delete scheduled_actions if none are specified if scheduled_actions is None: config['scheduled_actions'] = {} # allow defaults on start_time for s_name, action in six.iteritems(scheduled_actions): if 'start_time' not in action: asg_action = asg['scheduled_actions'].get(s_name, {}) if 'start_time' in asg_action: del asg_action['start_time'] proposed = {} # note: do not loop using "key, value" - this can modify the value of # the aws access key for asg_property, value in six.iteritems(config): # Only modify values being specified; introspection is difficult # otherwise since it's hard to track default values, which will # always be returned from AWS. if value is None: continue value = __utils__['boto3.ordered'](value) if asg_property in asg: _value = __utils__['boto3.ordered'](asg[asg_property]) if not value == _value: log.debug('%s asg_property differs from %s', value, _value) proposed.setdefault('old', {}).update({asg_property: _value}) proposed.setdefault('new', {}).update({asg_property: value}) need_update = True if need_update: if __opts__['test']: msg = 'Autoscale group set to be updated.' ret['comment'] = msg ret['result'] = None ret['changes'] = proposed return ret # add in alarms notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) updated, msg = __salt__['boto_asg.update']( name, launch_config_name, availability_zones, min_size, max_size, desired_capacity=desired_capacity, load_balancers=load_balancers, default_cooldown=default_cooldown, health_check_type=health_check_type, health_check_period=health_check_period, placement_group=placement_group, vpc_zone_identifier=vpc_zone_identifier, tags=tags, termination_policies=termination_policies, suspended_processes=suspended_processes, scaling_policies=scaling_policies, scheduled_actions=scheduled_actions, region=region, notification_arn=notification_arn, notification_types=notification_types, key=key, keyid=keyid, profile=profile ) if asg['launch_config_name'] != launch_config_name: # delete the old launch_config_name deleted = __salt__['boto_asg.delete_launch_configuration']( asg['launch_config_name'], region=region, key=key, keyid=keyid, profile=profile ) if deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] if updated: ret['changes']['old'] = asg asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg ret['comment'] = 'Updated autoscale group.' else: ret['result'] = False ret['comment'] = msg else: ret['comment'] = 'Autoscale group present.' # add in alarms _ret = _alarms_present( name, min_size == max_size, alarms, alarms_from_pillar, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _determine_termination_policies(termination_policies, termination_policies_from_pillar): ''' helper method for present. ensure that termination_policies are set ''' pillar_termination_policies = copy.deepcopy( __salt__['config.option'](termination_policies_from_pillar, []) ) if not termination_policies and pillar_termination_policies: termination_policies = pillar_termination_policies return termination_policies def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar): ''' helper method for present, ensure scheduled actions are setup ''' tmp = copy.deepcopy( __salt__['config.option'](scheduled_actions_from_pillar, {}) ) # merge with data from state if scheduled_actions: tmp = dictupdate.update(tmp, scheduled_actions) return tmp def _determine_notification_info(notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar): ''' helper method for present. ensure that notification_configs are set ''' pillar_arn_list = copy.deepcopy( __salt__['config.option'](notification_arn_from_pillar, {}) ) pillar_arn = None if pillar_arn_list: pillar_arn = pillar_arn_list[0] pillar_notification_types = copy.deepcopy( __salt__['config.option'](notification_types_from_pillar, {}) ) arn = notification_arn if notification_arn else pillar_arn types = notification_types if notification_types else pillar_notification_types return (arn, types) def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar, region, key, keyid, profile): ''' helper method for present. ensure that cloudwatch_alarms are set ''' # load data from alarms_from_pillar tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {})) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add asg to name and description info['name'] = name + ' ' + info['name'] info['attributes']['description'] = name + ' ' + info['attributes']['description'] # add dimension attribute if 'dimensions' not in info['attributes']: info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]} scaling_policy_actions_only = True # replace ":self:" with our name for action_type in ['alarm_actions', 'insufficient_data_actions', 'ok_actions']: if action_type in info['attributes']: new_actions = [] for action in info['attributes'][action_type]: if 'scaling_policy' not in action: scaling_policy_actions_only = False if ':self:' in action: action = action.replace(':self:', ':{0}:'.format(name)) new_actions.append(action) info['attributes'][action_type] = new_actions # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG if scaling_policy_actions_only and min_size_equals_max_size: continue # set alarm kwargs = { 'name': info['name'], 'attributes': info['attributes'], 'region': region, 'key': key, 'keyid': keyid, 'profile': profile, } results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results['result']: merged_return_value['result'] = False if results.get('changes', {}) != {}: merged_return_value['changes'][info['name']] = results['changes'] if 'comment' in results: merged_return_value['comment'] += results['comment'] return merged_return_value def absent( name, force=False, region=None, key=None, keyid=None, profile=None, remove_lc=False): ''' Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif asg: if __opts__['test']: ret['comment'] = 'Autoscale group set to be deleted.' ret['result'] = None if remove_lc: msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name']) ret['comment'] = ' '.join([ret['comment'], msg]) return ret deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid, profile) if deleted: if remove_lc: lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'], region, key, keyid, profile) if lc_deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.']) ret['changes']['old'] = asg ret['changes']['new'] = None ret['comment'] = 'Deleted autoscale group.' else: ret['result'] = False ret['comment'] = 'Failed to delete autoscale group.' else: ret['comment'] = 'Autoscale group does not exist.' return ret
saltstack/salt
salt/states/boto_asg.py
_determine_scheduled_actions
python
def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar): ''' helper method for present, ensure scheduled actions are setup ''' tmp = copy.deepcopy( __salt__['config.option'](scheduled_actions_from_pillar, {}) ) # merge with data from state if scheduled_actions: tmp = dictupdate.update(tmp, scheduled_actions) return tmp
helper method for present, ensure scheduled actions are setup
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_asg.py#L736-L746
null
# -*- coding: utf-8 -*- ''' Manage Autoscale Groups ======================= .. versionadded:: 2014.7.0 Create and destroy autoscale groups. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses boto, which can be installed via package, or pip. This module accepts explicit autoscale credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml asg.keyid: GKTADJGHEIQSXMKKRBJ08H asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - suspended_processes: - AddToLoadBalancer - AlarmNotification - scaling_policies - adjustment_type: ChangeInCapacity - as_name: api-production-iad - cooldown: 1800 - min_adjustment_step: None - name: ScaleDown - scaling_adjustment: -1 - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs # Using a profile from pillars. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile # Passing in a profile. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 # Deleting an autoscale group with running instances. Ensure myasg is deleted: boto_asg.absent: - name: myasg # If instances exist, we must force the deletion of the asg. - force: True It's possible to specify cloudwatch alarms that will be setup along with the ASG. Note the alarm name will be the name attribute defined, plus the ASG resource name. .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_asg_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for an ASG resource. Setting the alarms in a pillar: .. code-block:: yaml my_asg_alarm: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms_from_pillar: my_asg_alarm # override CPU:attributes:threshold - alarms: CPU: attributes: threshold: 50.0 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import copy # Import Salt libs import salt.utils.dictupdate as dictupdate import salt.utils.stringutils from salt.ext import six from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_asg' if 'boto_asg.exists' in __salt__ else False def present( name, launch_config_name, availability_zones, min_size, max_size, launch_config=None, desired_capacity=None, load_balancers=None, default_cooldown=None, health_check_type=None, health_check_period=None, placement_group=None, vpc_zone_identifier=None, subnet_names=None, tags=None, termination_policies=None, termination_policies_from_pillar='boto_asg_termination_policies', suspended_processes=None, scaling_policies=None, scaling_policies_from_pillar='boto_asg_scaling_policies', scheduled_actions=None, scheduled_actions_from_pillar='boto_asg_scheduled_actions', alarms=None, alarms_from_pillar='boto_asg_alarms', region=None, key=None, keyid=None, profile=None, notification_arn=None, notification_arn_from_pillar='boto_asg_notification_arn', notification_types=None, notification_types_from_pillar='boto_asg_notification_types'): ''' Ensure the autoscale group exists. name Name of the autoscale group. launch_config_name Name of the launch config to use for the group. Or, if ``launch_config`` is specified, this will be the launch config name's prefix. (see below) launch_config A dictionary of launch config attributes. If specified, a launch config will be used or created, matching this set of attributes, and the autoscale group will be set to use that launch config. The launch config name will be the ``launch_config_name`` followed by a hyphen followed by a hash of the ``launch_config`` dict contents. Example: .. code-block:: yaml my_asg: boto_asg.present: - launch_config: - ebs_optimized: false - instance_profile_name: my_iam_profile - kernel_id: '' - ramdisk_id: '' - key_name: my_ssh_key - image_name: aws2015091-hvm - instance_type: c3.xlarge - instance_monitoring: false - security_groups: - my_sec_group_01 - my_sec_group_02 availability_zones List of availability zones for the group. min_size Minimum size of the group. max_size Maximum size of the group. desired_capacity The desired capacity of the group. load_balancers List of load balancers for the group. Once set this can not be updated (Amazon restriction). default_cooldown Number of seconds after a Scaling Activity completes before any further scaling activities can start. health_check_type The service you want the health status from, Amazon EC2 or Elastic Load Balancer (EC2 or ELB). health_check_period Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. placement_group Physical location of your cluster placement group created in Amazon EC2. Once set this can not be updated (Amazon restriction). vpc_zone_identifier A list of the subnet identifiers of the Virtual Private Cloud. subnet_names For VPC, a list of subnet names (NOT subnet IDs) to deploy into. Exclusive with vpc_zone_identifier. tags A list of tags. Example: .. code-block:: yaml - key: 'key' value: 'value' propagate_at_launch: true termination_policies A list of termination policies. Valid values are: * ``OldestInstance`` * ``NewestInstance`` * ``OldestLaunchConfiguration`` * ``ClosestToNextInstanceHour`` * ``Default`` If no value is specified, the ``Default`` value is used. termination_policies_from_pillar: name of pillar dict that contains termination policy settings. Termination policies defined for this specific state will override those from pillar. suspended_processes List of processes to be suspended. see http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html scaling_policies List of scaling policies. Each policy is a dict of key-values described by https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy scaling_policies_from_pillar: name of pillar dict that contains scaling policy settings. Scaling policies defined for this specific state will override those from pillar. scheduled_actions: a dictionary of scheduled actions. Each key is the name of scheduled action and each value is dictionary of options. For example: .. code-block:: yaml - scheduled_actions: scale_up_at_10: desired_capacity: 4 min_size: 3 max_size: 5 recurrence: "0 9 * * 1-5" scale_down_at_7: desired_capacity: 1 min_size: 1 max_size: 1 recurrence: "0 19 * * 1-5" scheduled_actions_from_pillar: name of pillar dict that contains scheduled_actions settings. Scheduled actions for this specific state will override those from pillar. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG. All attributes should be specified except for dimension which will be automatically set to this ASG. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. If any alarm actions include ":self:" this will be replaced with the asg name. For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will map to the arn for this asg's scaling policy named "ScaleUp". In addition, any alarms that have only scaling_policy as actions will be ignored if min_size is equal to max_size for this ASG. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. notification_arn The AWS arn that notifications will be sent to notification_arn_from_pillar name of the pillar dict that contains ``notifcation_arn`` settings. A ``notification_arn`` defined for this specific state will override the one from pillar. notification_types A list of event names that will trigger a notification. The list of valid notification types is: * ``autoscaling:EC2_INSTANCE_LAUNCH`` * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR`` * ``autoscaling:EC2_INSTANCE_TERMINATE`` * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR`` * ``autoscaling:TEST_NOTIFICATION`` notification_types_from_pillar name of the pillar dict that contains ``notifcation_types`` settings. ``notification_types`` defined for this specific state will override those from the pillar. ''' if vpc_zone_identifier and subnet_names: raise SaltInvocationError('vpc_zone_identifier and subnet_names are ' 'mutually exclusive options.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if subnet_names: vpc_zone_identifier = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret vpc_zone_identifier.append(r['id']) if vpc_zone_identifier: vpc_id = __salt__['boto_vpc.get_subnet_association']( vpc_zone_identifier, region, key, keyid, profile ) vpc_id = vpc_id.get('vpc_id') log.debug('Auto Scaling Group %s is associated with VPC ID %s', name, vpc_id) else: vpc_id = None log.debug('Auto Scaling Group %s has no VPC Association', name) # if launch_config is defined, manage the launch config first. # hash the launch_config dict to create a unique name suffix and then # ensure it is present if launch_config: launch_config_bytes = salt.utils.stringutils.to_bytes(str(launch_config)) # future lint: disable=blacklisted-function launch_config_name = launch_config_name + '-' + hashlib.md5(launch_config_bytes).hexdigest() args = { 'name': launch_config_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } for index, item in enumerate(launch_config): if 'image_name' in item: image_name = item['image_name'] iargs = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**iargs) if image_ids: # find_images() returns False on failure launch_config[index]['image_id'] = image_ids[0] else: log.warning("Couldn't find AMI named `%s`, passing literally.", image_name) launch_config[index]['image_id'] = image_name del launch_config[index]['image_name'] break if vpc_id: log.debug('Auto Scaling Group {0} is a associated with a vpc') # locate the security groups attribute of a launch config sg_index = None for index, item in enumerate(launch_config): if 'security_groups' in item: sg_index = index break # if security groups exist within launch_config then convert # to group ids if sg_index is not None: log.debug('security group associations found in launch config') _group_ids = __salt__['boto_secgroup.convert_to_group_ids']( launch_config[sg_index]['security_groups'], vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) launch_config[sg_index]['security_groups'] = _group_ids for d in launch_config: args.update(d) if not __opts__['test']: lc_ret = __states__['boto_lc.present'](**args) if lc_ret['result'] is True and lc_ret['changes']: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config'] = lc_ret['changes'] asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) termination_policies = _determine_termination_policies( termination_policies, termination_policies_from_pillar ) scaling_policies = _determine_scaling_policies( scaling_policies, scaling_policies_from_pillar ) scheduled_actions = _determine_scheduled_actions( scheduled_actions, scheduled_actions_from_pillar ) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif not asg: if __opts__['test']: msg = 'Autoscale group set to be created.' ret['comment'] = msg ret['result'] = None return ret notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) created = __salt__['boto_asg.create'](name, launch_config_name, availability_zones, min_size, max_size, desired_capacity, load_balancers, default_cooldown, health_check_type, health_check_period, placement_group, vpc_zone_identifier, tags, termination_policies, suspended_processes, scaling_policies, scheduled_actions, region, notification_arn, notification_types, key, keyid, profile) if created: ret['changes']['old'] = None asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg else: ret['result'] = False ret['comment'] = 'Failed to create autoscale group' else: need_update = False # If any of these attributes can't be modified after creation # time, we should remove them from the dict. if scaling_policies: for policy in scaling_policies: if 'min_adjustment_step' not in policy: policy['min_adjustment_step'] = None if scheduled_actions: for s_name, action in six.iteritems(scheduled_actions): if 'end_time' not in action: action['end_time'] = None config = { 'launch_config_name': launch_config_name, 'availability_zones': availability_zones, 'min_size': min_size, 'max_size': max_size, 'desired_capacity': desired_capacity, 'default_cooldown': default_cooldown, 'health_check_type': health_check_type, 'health_check_period': health_check_period, 'vpc_zone_identifier': vpc_zone_identifier, 'tags': tags, 'termination_policies': termination_policies, 'suspended_processes': suspended_processes, 'scaling_policies': scaling_policies, 'scheduled_actions': scheduled_actions } #ensure that we reset termination_policies to default if none are specified if not termination_policies: config['termination_policies'] = ['Default'] if suspended_processes is None: config['suspended_processes'] = [] # ensure that we delete scaling_policies if none are specified if scaling_policies is None: config['scaling_policies'] = [] # ensure that we delete scheduled_actions if none are specified if scheduled_actions is None: config['scheduled_actions'] = {} # allow defaults on start_time for s_name, action in six.iteritems(scheduled_actions): if 'start_time' not in action: asg_action = asg['scheduled_actions'].get(s_name, {}) if 'start_time' in asg_action: del asg_action['start_time'] proposed = {} # note: do not loop using "key, value" - this can modify the value of # the aws access key for asg_property, value in six.iteritems(config): # Only modify values being specified; introspection is difficult # otherwise since it's hard to track default values, which will # always be returned from AWS. if value is None: continue value = __utils__['boto3.ordered'](value) if asg_property in asg: _value = __utils__['boto3.ordered'](asg[asg_property]) if not value == _value: log.debug('%s asg_property differs from %s', value, _value) proposed.setdefault('old', {}).update({asg_property: _value}) proposed.setdefault('new', {}).update({asg_property: value}) need_update = True if need_update: if __opts__['test']: msg = 'Autoscale group set to be updated.' ret['comment'] = msg ret['result'] = None ret['changes'] = proposed return ret # add in alarms notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) updated, msg = __salt__['boto_asg.update']( name, launch_config_name, availability_zones, min_size, max_size, desired_capacity=desired_capacity, load_balancers=load_balancers, default_cooldown=default_cooldown, health_check_type=health_check_type, health_check_period=health_check_period, placement_group=placement_group, vpc_zone_identifier=vpc_zone_identifier, tags=tags, termination_policies=termination_policies, suspended_processes=suspended_processes, scaling_policies=scaling_policies, scheduled_actions=scheduled_actions, region=region, notification_arn=notification_arn, notification_types=notification_types, key=key, keyid=keyid, profile=profile ) if asg['launch_config_name'] != launch_config_name: # delete the old launch_config_name deleted = __salt__['boto_asg.delete_launch_configuration']( asg['launch_config_name'], region=region, key=key, keyid=keyid, profile=profile ) if deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] if updated: ret['changes']['old'] = asg asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg ret['comment'] = 'Updated autoscale group.' else: ret['result'] = False ret['comment'] = msg else: ret['comment'] = 'Autoscale group present.' # add in alarms _ret = _alarms_present( name, min_size == max_size, alarms, alarms_from_pillar, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _determine_termination_policies(termination_policies, termination_policies_from_pillar): ''' helper method for present. ensure that termination_policies are set ''' pillar_termination_policies = copy.deepcopy( __salt__['config.option'](termination_policies_from_pillar, []) ) if not termination_policies and pillar_termination_policies: termination_policies = pillar_termination_policies return termination_policies def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar): ''' helper method for present. ensure that scaling_policies are set ''' pillar_scaling_policies = copy.deepcopy( __salt__['config.option'](scaling_policies_from_pillar, {}) ) if not scaling_policies and pillar_scaling_policies: scaling_policies = pillar_scaling_policies return scaling_policies def _determine_notification_info(notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar): ''' helper method for present. ensure that notification_configs are set ''' pillar_arn_list = copy.deepcopy( __salt__['config.option'](notification_arn_from_pillar, {}) ) pillar_arn = None if pillar_arn_list: pillar_arn = pillar_arn_list[0] pillar_notification_types = copy.deepcopy( __salt__['config.option'](notification_types_from_pillar, {}) ) arn = notification_arn if notification_arn else pillar_arn types = notification_types if notification_types else pillar_notification_types return (arn, types) def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar, region, key, keyid, profile): ''' helper method for present. ensure that cloudwatch_alarms are set ''' # load data from alarms_from_pillar tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {})) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add asg to name and description info['name'] = name + ' ' + info['name'] info['attributes']['description'] = name + ' ' + info['attributes']['description'] # add dimension attribute if 'dimensions' not in info['attributes']: info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]} scaling_policy_actions_only = True # replace ":self:" with our name for action_type in ['alarm_actions', 'insufficient_data_actions', 'ok_actions']: if action_type in info['attributes']: new_actions = [] for action in info['attributes'][action_type]: if 'scaling_policy' not in action: scaling_policy_actions_only = False if ':self:' in action: action = action.replace(':self:', ':{0}:'.format(name)) new_actions.append(action) info['attributes'][action_type] = new_actions # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG if scaling_policy_actions_only and min_size_equals_max_size: continue # set alarm kwargs = { 'name': info['name'], 'attributes': info['attributes'], 'region': region, 'key': key, 'keyid': keyid, 'profile': profile, } results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results['result']: merged_return_value['result'] = False if results.get('changes', {}) != {}: merged_return_value['changes'][info['name']] = results['changes'] if 'comment' in results: merged_return_value['comment'] += results['comment'] return merged_return_value def absent( name, force=False, region=None, key=None, keyid=None, profile=None, remove_lc=False): ''' Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif asg: if __opts__['test']: ret['comment'] = 'Autoscale group set to be deleted.' ret['result'] = None if remove_lc: msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name']) ret['comment'] = ' '.join([ret['comment'], msg]) return ret deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid, profile) if deleted: if remove_lc: lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'], region, key, keyid, profile) if lc_deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.']) ret['changes']['old'] = asg ret['changes']['new'] = None ret['comment'] = 'Deleted autoscale group.' else: ret['result'] = False ret['comment'] = 'Failed to delete autoscale group.' else: ret['comment'] = 'Autoscale group does not exist.' return ret
saltstack/salt
salt/states/boto_asg.py
_determine_notification_info
python
def _determine_notification_info(notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar): ''' helper method for present. ensure that notification_configs are set ''' pillar_arn_list = copy.deepcopy( __salt__['config.option'](notification_arn_from_pillar, {}) ) pillar_arn = None if pillar_arn_list: pillar_arn = pillar_arn_list[0] pillar_notification_types = copy.deepcopy( __salt__['config.option'](notification_types_from_pillar, {}) ) arn = notification_arn if notification_arn else pillar_arn types = notification_types if notification_types else pillar_notification_types return (arn, types)
helper method for present. ensure that notification_configs are set
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_asg.py#L749-L767
null
# -*- coding: utf-8 -*- ''' Manage Autoscale Groups ======================= .. versionadded:: 2014.7.0 Create and destroy autoscale groups. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses boto, which can be installed via package, or pip. This module accepts explicit autoscale credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml asg.keyid: GKTADJGHEIQSXMKKRBJ08H asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - suspended_processes: - AddToLoadBalancer - AlarmNotification - scaling_policies - adjustment_type: ChangeInCapacity - as_name: api-production-iad - cooldown: 1800 - min_adjustment_step: None - name: ScaleDown - scaling_adjustment: -1 - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs # Using a profile from pillars. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile # Passing in a profile. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 # Deleting an autoscale group with running instances. Ensure myasg is deleted: boto_asg.absent: - name: myasg # If instances exist, we must force the deletion of the asg. - force: True It's possible to specify cloudwatch alarms that will be setup along with the ASG. Note the alarm name will be the name attribute defined, plus the ASG resource name. .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_asg_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for an ASG resource. Setting the alarms in a pillar: .. code-block:: yaml my_asg_alarm: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms_from_pillar: my_asg_alarm # override CPU:attributes:threshold - alarms: CPU: attributes: threshold: 50.0 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import copy # Import Salt libs import salt.utils.dictupdate as dictupdate import salt.utils.stringutils from salt.ext import six from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_asg' if 'boto_asg.exists' in __salt__ else False def present( name, launch_config_name, availability_zones, min_size, max_size, launch_config=None, desired_capacity=None, load_balancers=None, default_cooldown=None, health_check_type=None, health_check_period=None, placement_group=None, vpc_zone_identifier=None, subnet_names=None, tags=None, termination_policies=None, termination_policies_from_pillar='boto_asg_termination_policies', suspended_processes=None, scaling_policies=None, scaling_policies_from_pillar='boto_asg_scaling_policies', scheduled_actions=None, scheduled_actions_from_pillar='boto_asg_scheduled_actions', alarms=None, alarms_from_pillar='boto_asg_alarms', region=None, key=None, keyid=None, profile=None, notification_arn=None, notification_arn_from_pillar='boto_asg_notification_arn', notification_types=None, notification_types_from_pillar='boto_asg_notification_types'): ''' Ensure the autoscale group exists. name Name of the autoscale group. launch_config_name Name of the launch config to use for the group. Or, if ``launch_config`` is specified, this will be the launch config name's prefix. (see below) launch_config A dictionary of launch config attributes. If specified, a launch config will be used or created, matching this set of attributes, and the autoscale group will be set to use that launch config. The launch config name will be the ``launch_config_name`` followed by a hyphen followed by a hash of the ``launch_config`` dict contents. Example: .. code-block:: yaml my_asg: boto_asg.present: - launch_config: - ebs_optimized: false - instance_profile_name: my_iam_profile - kernel_id: '' - ramdisk_id: '' - key_name: my_ssh_key - image_name: aws2015091-hvm - instance_type: c3.xlarge - instance_monitoring: false - security_groups: - my_sec_group_01 - my_sec_group_02 availability_zones List of availability zones for the group. min_size Minimum size of the group. max_size Maximum size of the group. desired_capacity The desired capacity of the group. load_balancers List of load balancers for the group. Once set this can not be updated (Amazon restriction). default_cooldown Number of seconds after a Scaling Activity completes before any further scaling activities can start. health_check_type The service you want the health status from, Amazon EC2 or Elastic Load Balancer (EC2 or ELB). health_check_period Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. placement_group Physical location of your cluster placement group created in Amazon EC2. Once set this can not be updated (Amazon restriction). vpc_zone_identifier A list of the subnet identifiers of the Virtual Private Cloud. subnet_names For VPC, a list of subnet names (NOT subnet IDs) to deploy into. Exclusive with vpc_zone_identifier. tags A list of tags. Example: .. code-block:: yaml - key: 'key' value: 'value' propagate_at_launch: true termination_policies A list of termination policies. Valid values are: * ``OldestInstance`` * ``NewestInstance`` * ``OldestLaunchConfiguration`` * ``ClosestToNextInstanceHour`` * ``Default`` If no value is specified, the ``Default`` value is used. termination_policies_from_pillar: name of pillar dict that contains termination policy settings. Termination policies defined for this specific state will override those from pillar. suspended_processes List of processes to be suspended. see http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html scaling_policies List of scaling policies. Each policy is a dict of key-values described by https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy scaling_policies_from_pillar: name of pillar dict that contains scaling policy settings. Scaling policies defined for this specific state will override those from pillar. scheduled_actions: a dictionary of scheduled actions. Each key is the name of scheduled action and each value is dictionary of options. For example: .. code-block:: yaml - scheduled_actions: scale_up_at_10: desired_capacity: 4 min_size: 3 max_size: 5 recurrence: "0 9 * * 1-5" scale_down_at_7: desired_capacity: 1 min_size: 1 max_size: 1 recurrence: "0 19 * * 1-5" scheduled_actions_from_pillar: name of pillar dict that contains scheduled_actions settings. Scheduled actions for this specific state will override those from pillar. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG. All attributes should be specified except for dimension which will be automatically set to this ASG. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. If any alarm actions include ":self:" this will be replaced with the asg name. For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will map to the arn for this asg's scaling policy named "ScaleUp". In addition, any alarms that have only scaling_policy as actions will be ignored if min_size is equal to max_size for this ASG. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. notification_arn The AWS arn that notifications will be sent to notification_arn_from_pillar name of the pillar dict that contains ``notifcation_arn`` settings. A ``notification_arn`` defined for this specific state will override the one from pillar. notification_types A list of event names that will trigger a notification. The list of valid notification types is: * ``autoscaling:EC2_INSTANCE_LAUNCH`` * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR`` * ``autoscaling:EC2_INSTANCE_TERMINATE`` * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR`` * ``autoscaling:TEST_NOTIFICATION`` notification_types_from_pillar name of the pillar dict that contains ``notifcation_types`` settings. ``notification_types`` defined for this specific state will override those from the pillar. ''' if vpc_zone_identifier and subnet_names: raise SaltInvocationError('vpc_zone_identifier and subnet_names are ' 'mutually exclusive options.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if subnet_names: vpc_zone_identifier = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret vpc_zone_identifier.append(r['id']) if vpc_zone_identifier: vpc_id = __salt__['boto_vpc.get_subnet_association']( vpc_zone_identifier, region, key, keyid, profile ) vpc_id = vpc_id.get('vpc_id') log.debug('Auto Scaling Group %s is associated with VPC ID %s', name, vpc_id) else: vpc_id = None log.debug('Auto Scaling Group %s has no VPC Association', name) # if launch_config is defined, manage the launch config first. # hash the launch_config dict to create a unique name suffix and then # ensure it is present if launch_config: launch_config_bytes = salt.utils.stringutils.to_bytes(str(launch_config)) # future lint: disable=blacklisted-function launch_config_name = launch_config_name + '-' + hashlib.md5(launch_config_bytes).hexdigest() args = { 'name': launch_config_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } for index, item in enumerate(launch_config): if 'image_name' in item: image_name = item['image_name'] iargs = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**iargs) if image_ids: # find_images() returns False on failure launch_config[index]['image_id'] = image_ids[0] else: log.warning("Couldn't find AMI named `%s`, passing literally.", image_name) launch_config[index]['image_id'] = image_name del launch_config[index]['image_name'] break if vpc_id: log.debug('Auto Scaling Group {0} is a associated with a vpc') # locate the security groups attribute of a launch config sg_index = None for index, item in enumerate(launch_config): if 'security_groups' in item: sg_index = index break # if security groups exist within launch_config then convert # to group ids if sg_index is not None: log.debug('security group associations found in launch config') _group_ids = __salt__['boto_secgroup.convert_to_group_ids']( launch_config[sg_index]['security_groups'], vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) launch_config[sg_index]['security_groups'] = _group_ids for d in launch_config: args.update(d) if not __opts__['test']: lc_ret = __states__['boto_lc.present'](**args) if lc_ret['result'] is True and lc_ret['changes']: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config'] = lc_ret['changes'] asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) termination_policies = _determine_termination_policies( termination_policies, termination_policies_from_pillar ) scaling_policies = _determine_scaling_policies( scaling_policies, scaling_policies_from_pillar ) scheduled_actions = _determine_scheduled_actions( scheduled_actions, scheduled_actions_from_pillar ) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif not asg: if __opts__['test']: msg = 'Autoscale group set to be created.' ret['comment'] = msg ret['result'] = None return ret notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) created = __salt__['boto_asg.create'](name, launch_config_name, availability_zones, min_size, max_size, desired_capacity, load_balancers, default_cooldown, health_check_type, health_check_period, placement_group, vpc_zone_identifier, tags, termination_policies, suspended_processes, scaling_policies, scheduled_actions, region, notification_arn, notification_types, key, keyid, profile) if created: ret['changes']['old'] = None asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg else: ret['result'] = False ret['comment'] = 'Failed to create autoscale group' else: need_update = False # If any of these attributes can't be modified after creation # time, we should remove them from the dict. if scaling_policies: for policy in scaling_policies: if 'min_adjustment_step' not in policy: policy['min_adjustment_step'] = None if scheduled_actions: for s_name, action in six.iteritems(scheduled_actions): if 'end_time' not in action: action['end_time'] = None config = { 'launch_config_name': launch_config_name, 'availability_zones': availability_zones, 'min_size': min_size, 'max_size': max_size, 'desired_capacity': desired_capacity, 'default_cooldown': default_cooldown, 'health_check_type': health_check_type, 'health_check_period': health_check_period, 'vpc_zone_identifier': vpc_zone_identifier, 'tags': tags, 'termination_policies': termination_policies, 'suspended_processes': suspended_processes, 'scaling_policies': scaling_policies, 'scheduled_actions': scheduled_actions } #ensure that we reset termination_policies to default if none are specified if not termination_policies: config['termination_policies'] = ['Default'] if suspended_processes is None: config['suspended_processes'] = [] # ensure that we delete scaling_policies if none are specified if scaling_policies is None: config['scaling_policies'] = [] # ensure that we delete scheduled_actions if none are specified if scheduled_actions is None: config['scheduled_actions'] = {} # allow defaults on start_time for s_name, action in six.iteritems(scheduled_actions): if 'start_time' not in action: asg_action = asg['scheduled_actions'].get(s_name, {}) if 'start_time' in asg_action: del asg_action['start_time'] proposed = {} # note: do not loop using "key, value" - this can modify the value of # the aws access key for asg_property, value in six.iteritems(config): # Only modify values being specified; introspection is difficult # otherwise since it's hard to track default values, which will # always be returned from AWS. if value is None: continue value = __utils__['boto3.ordered'](value) if asg_property in asg: _value = __utils__['boto3.ordered'](asg[asg_property]) if not value == _value: log.debug('%s asg_property differs from %s', value, _value) proposed.setdefault('old', {}).update({asg_property: _value}) proposed.setdefault('new', {}).update({asg_property: value}) need_update = True if need_update: if __opts__['test']: msg = 'Autoscale group set to be updated.' ret['comment'] = msg ret['result'] = None ret['changes'] = proposed return ret # add in alarms notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) updated, msg = __salt__['boto_asg.update']( name, launch_config_name, availability_zones, min_size, max_size, desired_capacity=desired_capacity, load_balancers=load_balancers, default_cooldown=default_cooldown, health_check_type=health_check_type, health_check_period=health_check_period, placement_group=placement_group, vpc_zone_identifier=vpc_zone_identifier, tags=tags, termination_policies=termination_policies, suspended_processes=suspended_processes, scaling_policies=scaling_policies, scheduled_actions=scheduled_actions, region=region, notification_arn=notification_arn, notification_types=notification_types, key=key, keyid=keyid, profile=profile ) if asg['launch_config_name'] != launch_config_name: # delete the old launch_config_name deleted = __salt__['boto_asg.delete_launch_configuration']( asg['launch_config_name'], region=region, key=key, keyid=keyid, profile=profile ) if deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] if updated: ret['changes']['old'] = asg asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg ret['comment'] = 'Updated autoscale group.' else: ret['result'] = False ret['comment'] = msg else: ret['comment'] = 'Autoscale group present.' # add in alarms _ret = _alarms_present( name, min_size == max_size, alarms, alarms_from_pillar, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _determine_termination_policies(termination_policies, termination_policies_from_pillar): ''' helper method for present. ensure that termination_policies are set ''' pillar_termination_policies = copy.deepcopy( __salt__['config.option'](termination_policies_from_pillar, []) ) if not termination_policies and pillar_termination_policies: termination_policies = pillar_termination_policies return termination_policies def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar): ''' helper method for present. ensure that scaling_policies are set ''' pillar_scaling_policies = copy.deepcopy( __salt__['config.option'](scaling_policies_from_pillar, {}) ) if not scaling_policies and pillar_scaling_policies: scaling_policies = pillar_scaling_policies return scaling_policies def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar): ''' helper method for present, ensure scheduled actions are setup ''' tmp = copy.deepcopy( __salt__['config.option'](scheduled_actions_from_pillar, {}) ) # merge with data from state if scheduled_actions: tmp = dictupdate.update(tmp, scheduled_actions) return tmp def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar, region, key, keyid, profile): ''' helper method for present. ensure that cloudwatch_alarms are set ''' # load data from alarms_from_pillar tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {})) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add asg to name and description info['name'] = name + ' ' + info['name'] info['attributes']['description'] = name + ' ' + info['attributes']['description'] # add dimension attribute if 'dimensions' not in info['attributes']: info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]} scaling_policy_actions_only = True # replace ":self:" with our name for action_type in ['alarm_actions', 'insufficient_data_actions', 'ok_actions']: if action_type in info['attributes']: new_actions = [] for action in info['attributes'][action_type]: if 'scaling_policy' not in action: scaling_policy_actions_only = False if ':self:' in action: action = action.replace(':self:', ':{0}:'.format(name)) new_actions.append(action) info['attributes'][action_type] = new_actions # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG if scaling_policy_actions_only and min_size_equals_max_size: continue # set alarm kwargs = { 'name': info['name'], 'attributes': info['attributes'], 'region': region, 'key': key, 'keyid': keyid, 'profile': profile, } results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results['result']: merged_return_value['result'] = False if results.get('changes', {}) != {}: merged_return_value['changes'][info['name']] = results['changes'] if 'comment' in results: merged_return_value['comment'] += results['comment'] return merged_return_value def absent( name, force=False, region=None, key=None, keyid=None, profile=None, remove_lc=False): ''' Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif asg: if __opts__['test']: ret['comment'] = 'Autoscale group set to be deleted.' ret['result'] = None if remove_lc: msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name']) ret['comment'] = ' '.join([ret['comment'], msg]) return ret deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid, profile) if deleted: if remove_lc: lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'], region, key, keyid, profile) if lc_deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.']) ret['changes']['old'] = asg ret['changes']['new'] = None ret['comment'] = 'Deleted autoscale group.' else: ret['result'] = False ret['comment'] = 'Failed to delete autoscale group.' else: ret['comment'] = 'Autoscale group does not exist.' return ret
saltstack/salt
salt/states/boto_asg.py
_alarms_present
python
def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar, region, key, keyid, profile): ''' helper method for present. ensure that cloudwatch_alarms are set ''' # load data from alarms_from_pillar tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {})) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add asg to name and description info['name'] = name + ' ' + info['name'] info['attributes']['description'] = name + ' ' + info['attributes']['description'] # add dimension attribute if 'dimensions' not in info['attributes']: info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]} scaling_policy_actions_only = True # replace ":self:" with our name for action_type in ['alarm_actions', 'insufficient_data_actions', 'ok_actions']: if action_type in info['attributes']: new_actions = [] for action in info['attributes'][action_type]: if 'scaling_policy' not in action: scaling_policy_actions_only = False if ':self:' in action: action = action.replace(':self:', ':{0}:'.format(name)) new_actions.append(action) info['attributes'][action_type] = new_actions # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG if scaling_policy_actions_only and min_size_equals_max_size: continue # set alarm kwargs = { 'name': info['name'], 'attributes': info['attributes'], 'region': region, 'key': key, 'keyid': keyid, 'profile': profile, } results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results['result']: merged_return_value['result'] = False if results.get('changes', {}) != {}: merged_return_value['changes'][info['name']] = results['changes'] if 'comment' in results: merged_return_value['comment'] += results['comment'] return merged_return_value
helper method for present. ensure that cloudwatch_alarms are set
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_asg.py#L770-L819
null
# -*- coding: utf-8 -*- ''' Manage Autoscale Groups ======================= .. versionadded:: 2014.7.0 Create and destroy autoscale groups. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses boto, which can be installed via package, or pip. This module accepts explicit autoscale credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml asg.keyid: GKTADJGHEIQSXMKKRBJ08H asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - suspended_processes: - AddToLoadBalancer - AlarmNotification - scaling_policies - adjustment_type: ChangeInCapacity - as_name: api-production-iad - cooldown: 1800 - min_adjustment_step: None - name: ScaleDown - scaling_adjustment: -1 - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs # Using a profile from pillars. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile # Passing in a profile. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 # Deleting an autoscale group with running instances. Ensure myasg is deleted: boto_asg.absent: - name: myasg # If instances exist, we must force the deletion of the asg. - force: True It's possible to specify cloudwatch alarms that will be setup along with the ASG. Note the alarm name will be the name attribute defined, plus the ASG resource name. .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_asg_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for an ASG resource. Setting the alarms in a pillar: .. code-block:: yaml my_asg_alarm: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms_from_pillar: my_asg_alarm # override CPU:attributes:threshold - alarms: CPU: attributes: threshold: 50.0 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import copy # Import Salt libs import salt.utils.dictupdate as dictupdate import salt.utils.stringutils from salt.ext import six from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_asg' if 'boto_asg.exists' in __salt__ else False def present( name, launch_config_name, availability_zones, min_size, max_size, launch_config=None, desired_capacity=None, load_balancers=None, default_cooldown=None, health_check_type=None, health_check_period=None, placement_group=None, vpc_zone_identifier=None, subnet_names=None, tags=None, termination_policies=None, termination_policies_from_pillar='boto_asg_termination_policies', suspended_processes=None, scaling_policies=None, scaling_policies_from_pillar='boto_asg_scaling_policies', scheduled_actions=None, scheduled_actions_from_pillar='boto_asg_scheduled_actions', alarms=None, alarms_from_pillar='boto_asg_alarms', region=None, key=None, keyid=None, profile=None, notification_arn=None, notification_arn_from_pillar='boto_asg_notification_arn', notification_types=None, notification_types_from_pillar='boto_asg_notification_types'): ''' Ensure the autoscale group exists. name Name of the autoscale group. launch_config_name Name of the launch config to use for the group. Or, if ``launch_config`` is specified, this will be the launch config name's prefix. (see below) launch_config A dictionary of launch config attributes. If specified, a launch config will be used or created, matching this set of attributes, and the autoscale group will be set to use that launch config. The launch config name will be the ``launch_config_name`` followed by a hyphen followed by a hash of the ``launch_config`` dict contents. Example: .. code-block:: yaml my_asg: boto_asg.present: - launch_config: - ebs_optimized: false - instance_profile_name: my_iam_profile - kernel_id: '' - ramdisk_id: '' - key_name: my_ssh_key - image_name: aws2015091-hvm - instance_type: c3.xlarge - instance_monitoring: false - security_groups: - my_sec_group_01 - my_sec_group_02 availability_zones List of availability zones for the group. min_size Minimum size of the group. max_size Maximum size of the group. desired_capacity The desired capacity of the group. load_balancers List of load balancers for the group. Once set this can not be updated (Amazon restriction). default_cooldown Number of seconds after a Scaling Activity completes before any further scaling activities can start. health_check_type The service you want the health status from, Amazon EC2 or Elastic Load Balancer (EC2 or ELB). health_check_period Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. placement_group Physical location of your cluster placement group created in Amazon EC2. Once set this can not be updated (Amazon restriction). vpc_zone_identifier A list of the subnet identifiers of the Virtual Private Cloud. subnet_names For VPC, a list of subnet names (NOT subnet IDs) to deploy into. Exclusive with vpc_zone_identifier. tags A list of tags. Example: .. code-block:: yaml - key: 'key' value: 'value' propagate_at_launch: true termination_policies A list of termination policies. Valid values are: * ``OldestInstance`` * ``NewestInstance`` * ``OldestLaunchConfiguration`` * ``ClosestToNextInstanceHour`` * ``Default`` If no value is specified, the ``Default`` value is used. termination_policies_from_pillar: name of pillar dict that contains termination policy settings. Termination policies defined for this specific state will override those from pillar. suspended_processes List of processes to be suspended. see http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html scaling_policies List of scaling policies. Each policy is a dict of key-values described by https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy scaling_policies_from_pillar: name of pillar dict that contains scaling policy settings. Scaling policies defined for this specific state will override those from pillar. scheduled_actions: a dictionary of scheduled actions. Each key is the name of scheduled action and each value is dictionary of options. For example: .. code-block:: yaml - scheduled_actions: scale_up_at_10: desired_capacity: 4 min_size: 3 max_size: 5 recurrence: "0 9 * * 1-5" scale_down_at_7: desired_capacity: 1 min_size: 1 max_size: 1 recurrence: "0 19 * * 1-5" scheduled_actions_from_pillar: name of pillar dict that contains scheduled_actions settings. Scheduled actions for this specific state will override those from pillar. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG. All attributes should be specified except for dimension which will be automatically set to this ASG. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. If any alarm actions include ":self:" this will be replaced with the asg name. For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will map to the arn for this asg's scaling policy named "ScaleUp". In addition, any alarms that have only scaling_policy as actions will be ignored if min_size is equal to max_size for this ASG. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. notification_arn The AWS arn that notifications will be sent to notification_arn_from_pillar name of the pillar dict that contains ``notifcation_arn`` settings. A ``notification_arn`` defined for this specific state will override the one from pillar. notification_types A list of event names that will trigger a notification. The list of valid notification types is: * ``autoscaling:EC2_INSTANCE_LAUNCH`` * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR`` * ``autoscaling:EC2_INSTANCE_TERMINATE`` * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR`` * ``autoscaling:TEST_NOTIFICATION`` notification_types_from_pillar name of the pillar dict that contains ``notifcation_types`` settings. ``notification_types`` defined for this specific state will override those from the pillar. ''' if vpc_zone_identifier and subnet_names: raise SaltInvocationError('vpc_zone_identifier and subnet_names are ' 'mutually exclusive options.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if subnet_names: vpc_zone_identifier = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret vpc_zone_identifier.append(r['id']) if vpc_zone_identifier: vpc_id = __salt__['boto_vpc.get_subnet_association']( vpc_zone_identifier, region, key, keyid, profile ) vpc_id = vpc_id.get('vpc_id') log.debug('Auto Scaling Group %s is associated with VPC ID %s', name, vpc_id) else: vpc_id = None log.debug('Auto Scaling Group %s has no VPC Association', name) # if launch_config is defined, manage the launch config first. # hash the launch_config dict to create a unique name suffix and then # ensure it is present if launch_config: launch_config_bytes = salt.utils.stringutils.to_bytes(str(launch_config)) # future lint: disable=blacklisted-function launch_config_name = launch_config_name + '-' + hashlib.md5(launch_config_bytes).hexdigest() args = { 'name': launch_config_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } for index, item in enumerate(launch_config): if 'image_name' in item: image_name = item['image_name'] iargs = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**iargs) if image_ids: # find_images() returns False on failure launch_config[index]['image_id'] = image_ids[0] else: log.warning("Couldn't find AMI named `%s`, passing literally.", image_name) launch_config[index]['image_id'] = image_name del launch_config[index]['image_name'] break if vpc_id: log.debug('Auto Scaling Group {0} is a associated with a vpc') # locate the security groups attribute of a launch config sg_index = None for index, item in enumerate(launch_config): if 'security_groups' in item: sg_index = index break # if security groups exist within launch_config then convert # to group ids if sg_index is not None: log.debug('security group associations found in launch config') _group_ids = __salt__['boto_secgroup.convert_to_group_ids']( launch_config[sg_index]['security_groups'], vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) launch_config[sg_index]['security_groups'] = _group_ids for d in launch_config: args.update(d) if not __opts__['test']: lc_ret = __states__['boto_lc.present'](**args) if lc_ret['result'] is True and lc_ret['changes']: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config'] = lc_ret['changes'] asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) termination_policies = _determine_termination_policies( termination_policies, termination_policies_from_pillar ) scaling_policies = _determine_scaling_policies( scaling_policies, scaling_policies_from_pillar ) scheduled_actions = _determine_scheduled_actions( scheduled_actions, scheduled_actions_from_pillar ) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif not asg: if __opts__['test']: msg = 'Autoscale group set to be created.' ret['comment'] = msg ret['result'] = None return ret notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) created = __salt__['boto_asg.create'](name, launch_config_name, availability_zones, min_size, max_size, desired_capacity, load_balancers, default_cooldown, health_check_type, health_check_period, placement_group, vpc_zone_identifier, tags, termination_policies, suspended_processes, scaling_policies, scheduled_actions, region, notification_arn, notification_types, key, keyid, profile) if created: ret['changes']['old'] = None asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg else: ret['result'] = False ret['comment'] = 'Failed to create autoscale group' else: need_update = False # If any of these attributes can't be modified after creation # time, we should remove them from the dict. if scaling_policies: for policy in scaling_policies: if 'min_adjustment_step' not in policy: policy['min_adjustment_step'] = None if scheduled_actions: for s_name, action in six.iteritems(scheduled_actions): if 'end_time' not in action: action['end_time'] = None config = { 'launch_config_name': launch_config_name, 'availability_zones': availability_zones, 'min_size': min_size, 'max_size': max_size, 'desired_capacity': desired_capacity, 'default_cooldown': default_cooldown, 'health_check_type': health_check_type, 'health_check_period': health_check_period, 'vpc_zone_identifier': vpc_zone_identifier, 'tags': tags, 'termination_policies': termination_policies, 'suspended_processes': suspended_processes, 'scaling_policies': scaling_policies, 'scheduled_actions': scheduled_actions } #ensure that we reset termination_policies to default if none are specified if not termination_policies: config['termination_policies'] = ['Default'] if suspended_processes is None: config['suspended_processes'] = [] # ensure that we delete scaling_policies if none are specified if scaling_policies is None: config['scaling_policies'] = [] # ensure that we delete scheduled_actions if none are specified if scheduled_actions is None: config['scheduled_actions'] = {} # allow defaults on start_time for s_name, action in six.iteritems(scheduled_actions): if 'start_time' not in action: asg_action = asg['scheduled_actions'].get(s_name, {}) if 'start_time' in asg_action: del asg_action['start_time'] proposed = {} # note: do not loop using "key, value" - this can modify the value of # the aws access key for asg_property, value in six.iteritems(config): # Only modify values being specified; introspection is difficult # otherwise since it's hard to track default values, which will # always be returned from AWS. if value is None: continue value = __utils__['boto3.ordered'](value) if asg_property in asg: _value = __utils__['boto3.ordered'](asg[asg_property]) if not value == _value: log.debug('%s asg_property differs from %s', value, _value) proposed.setdefault('old', {}).update({asg_property: _value}) proposed.setdefault('new', {}).update({asg_property: value}) need_update = True if need_update: if __opts__['test']: msg = 'Autoscale group set to be updated.' ret['comment'] = msg ret['result'] = None ret['changes'] = proposed return ret # add in alarms notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) updated, msg = __salt__['boto_asg.update']( name, launch_config_name, availability_zones, min_size, max_size, desired_capacity=desired_capacity, load_balancers=load_balancers, default_cooldown=default_cooldown, health_check_type=health_check_type, health_check_period=health_check_period, placement_group=placement_group, vpc_zone_identifier=vpc_zone_identifier, tags=tags, termination_policies=termination_policies, suspended_processes=suspended_processes, scaling_policies=scaling_policies, scheduled_actions=scheduled_actions, region=region, notification_arn=notification_arn, notification_types=notification_types, key=key, keyid=keyid, profile=profile ) if asg['launch_config_name'] != launch_config_name: # delete the old launch_config_name deleted = __salt__['boto_asg.delete_launch_configuration']( asg['launch_config_name'], region=region, key=key, keyid=keyid, profile=profile ) if deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] if updated: ret['changes']['old'] = asg asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg ret['comment'] = 'Updated autoscale group.' else: ret['result'] = False ret['comment'] = msg else: ret['comment'] = 'Autoscale group present.' # add in alarms _ret = _alarms_present( name, min_size == max_size, alarms, alarms_from_pillar, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _determine_termination_policies(termination_policies, termination_policies_from_pillar): ''' helper method for present. ensure that termination_policies are set ''' pillar_termination_policies = copy.deepcopy( __salt__['config.option'](termination_policies_from_pillar, []) ) if not termination_policies and pillar_termination_policies: termination_policies = pillar_termination_policies return termination_policies def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar): ''' helper method for present. ensure that scaling_policies are set ''' pillar_scaling_policies = copy.deepcopy( __salt__['config.option'](scaling_policies_from_pillar, {}) ) if not scaling_policies and pillar_scaling_policies: scaling_policies = pillar_scaling_policies return scaling_policies def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar): ''' helper method for present, ensure scheduled actions are setup ''' tmp = copy.deepcopy( __salt__['config.option'](scheduled_actions_from_pillar, {}) ) # merge with data from state if scheduled_actions: tmp = dictupdate.update(tmp, scheduled_actions) return tmp def _determine_notification_info(notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar): ''' helper method for present. ensure that notification_configs are set ''' pillar_arn_list = copy.deepcopy( __salt__['config.option'](notification_arn_from_pillar, {}) ) pillar_arn = None if pillar_arn_list: pillar_arn = pillar_arn_list[0] pillar_notification_types = copy.deepcopy( __salt__['config.option'](notification_types_from_pillar, {}) ) arn = notification_arn if notification_arn else pillar_arn types = notification_types if notification_types else pillar_notification_types return (arn, types) def absent( name, force=False, region=None, key=None, keyid=None, profile=None, remove_lc=False): ''' Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif asg: if __opts__['test']: ret['comment'] = 'Autoscale group set to be deleted.' ret['result'] = None if remove_lc: msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name']) ret['comment'] = ' '.join([ret['comment'], msg]) return ret deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid, profile) if deleted: if remove_lc: lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'], region, key, keyid, profile) if lc_deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.']) ret['changes']['old'] = asg ret['changes']['new'] = None ret['comment'] = 'Deleted autoscale group.' else: ret['result'] = False ret['comment'] = 'Failed to delete autoscale group.' else: ret['comment'] = 'Autoscale group does not exist.' return ret
saltstack/salt
salt/states/boto_asg.py
absent
python
def absent( name, force=False, region=None, key=None, keyid=None, profile=None, remove_lc=False): ''' Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif asg: if __opts__['test']: ret['comment'] = 'Autoscale group set to be deleted.' ret['result'] = None if remove_lc: msg = 'Launch configuration {0} is set to be deleted.'.format(asg['launch_config_name']) ret['comment'] = ' '.join([ret['comment'], msg]) return ret deleted = __salt__['boto_asg.delete'](name, force, region, key, keyid, profile) if deleted: if remove_lc: lc_deleted = __salt__['boto_asg.delete_launch_configuration'](asg['launch_config_name'], region, key, keyid, profile) if lc_deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to delete launch configuration.']) ret['changes']['old'] = asg ret['changes']['new'] = None ret['comment'] = 'Deleted autoscale group.' else: ret['result'] = False ret['comment'] = 'Failed to delete autoscale group.' else: ret['comment'] = 'Autoscale group does not exist.' return ret
Ensure the named autoscale group is deleted. name Name of the autoscale group. force Force deletion of autoscale group. remove_lc Delete the launch config as well. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_asg.py#L822-L892
null
# -*- coding: utf-8 -*- ''' Manage Autoscale Groups ======================= .. versionadded:: 2014.7.0 Create and destroy autoscale groups. Be aware that this interacts with Amazon's services, and so may incur charges. This module uses boto, which can be installed via package, or pip. This module accepts explicit autoscale credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More Information available at: .. code-block:: text http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html If IAM roles are not used you need to specify them either in a pillar or in the minion's config file: .. code-block:: yaml asg.keyid: GKTADJGHEIQSXMKKRBJ08H asg.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify key, keyid and region via a profile, either as a passed in dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - suspended_processes: - AddToLoadBalancer - AlarmNotification - scaling_policies - adjustment_type: ChangeInCapacity - as_name: api-production-iad - cooldown: 1800 - min_adjustment_step: None - name: ScaleDown - scaling_adjustment: -1 - region: us-east-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs # Using a profile from pillars. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile # Passing in a profile. Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 # Deleting an autoscale group with running instances. Ensure myasg is deleted: boto_asg.absent: - name: myasg # If instances exist, we must force the deletion of the asg. - force: True It's possible to specify cloudwatch alarms that will be setup along with the ASG. Note the alarm name will be the name attribute defined, plus the ASG resource name. .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] You can also use alarms from pillars, and override values from the pillar alarms by setting overrides on the resource. Note that 'boto_asg_alarms' will be used as a default value for all resources, if defined and can be used to ensure alarms are always set for an ASG resource. Setting the alarms in a pillar: .. code-block:: yaml my_asg_alarm: CPU: name: 'ASG CPU **MANAGED BY SALT**' attributes: metric: CPUUtilization namespace: AWS/EC2 statistic: Average comparison: '>=' threshold: 65.0 period: 60 evaluation_periods: 30 unit: null description: 'ASG CPU' alarm_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] insufficient_data_actions: [] ok_actions: [ 'arn:aws:sns:us-east-1:12345:myalarm' ] Overriding the alarm values on the resource: .. code-block:: yaml Ensure myasg exists: boto_asg.present: - name: myasg - launch_config_name: mylc - availability_zones: - us-east-1a - us-east-1b - min_size: 1 - max_size: 1 - desired_capacity: 1 - load_balancers: - myelb - profile: myprofile - alarms_from_pillar: my_asg_alarm # override CPU:attributes:threshold - alarms: CPU: attributes: threshold: 50.0 ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import hashlib import logging import copy # Import Salt libs import salt.utils.dictupdate as dictupdate import salt.utils.stringutils from salt.ext import six from salt.exceptions import SaltInvocationError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_asg' if 'boto_asg.exists' in __salt__ else False def present( name, launch_config_name, availability_zones, min_size, max_size, launch_config=None, desired_capacity=None, load_balancers=None, default_cooldown=None, health_check_type=None, health_check_period=None, placement_group=None, vpc_zone_identifier=None, subnet_names=None, tags=None, termination_policies=None, termination_policies_from_pillar='boto_asg_termination_policies', suspended_processes=None, scaling_policies=None, scaling_policies_from_pillar='boto_asg_scaling_policies', scheduled_actions=None, scheduled_actions_from_pillar='boto_asg_scheduled_actions', alarms=None, alarms_from_pillar='boto_asg_alarms', region=None, key=None, keyid=None, profile=None, notification_arn=None, notification_arn_from_pillar='boto_asg_notification_arn', notification_types=None, notification_types_from_pillar='boto_asg_notification_types'): ''' Ensure the autoscale group exists. name Name of the autoscale group. launch_config_name Name of the launch config to use for the group. Or, if ``launch_config`` is specified, this will be the launch config name's prefix. (see below) launch_config A dictionary of launch config attributes. If specified, a launch config will be used or created, matching this set of attributes, and the autoscale group will be set to use that launch config. The launch config name will be the ``launch_config_name`` followed by a hyphen followed by a hash of the ``launch_config`` dict contents. Example: .. code-block:: yaml my_asg: boto_asg.present: - launch_config: - ebs_optimized: false - instance_profile_name: my_iam_profile - kernel_id: '' - ramdisk_id: '' - key_name: my_ssh_key - image_name: aws2015091-hvm - instance_type: c3.xlarge - instance_monitoring: false - security_groups: - my_sec_group_01 - my_sec_group_02 availability_zones List of availability zones for the group. min_size Minimum size of the group. max_size Maximum size of the group. desired_capacity The desired capacity of the group. load_balancers List of load balancers for the group. Once set this can not be updated (Amazon restriction). default_cooldown Number of seconds after a Scaling Activity completes before any further scaling activities can start. health_check_type The service you want the health status from, Amazon EC2 or Elastic Load Balancer (EC2 or ELB). health_check_period Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health. placement_group Physical location of your cluster placement group created in Amazon EC2. Once set this can not be updated (Amazon restriction). vpc_zone_identifier A list of the subnet identifiers of the Virtual Private Cloud. subnet_names For VPC, a list of subnet names (NOT subnet IDs) to deploy into. Exclusive with vpc_zone_identifier. tags A list of tags. Example: .. code-block:: yaml - key: 'key' value: 'value' propagate_at_launch: true termination_policies A list of termination policies. Valid values are: * ``OldestInstance`` * ``NewestInstance`` * ``OldestLaunchConfiguration`` * ``ClosestToNextInstanceHour`` * ``Default`` If no value is specified, the ``Default`` value is used. termination_policies_from_pillar: name of pillar dict that contains termination policy settings. Termination policies defined for this specific state will override those from pillar. suspended_processes List of processes to be suspended. see http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html scaling_policies List of scaling policies. Each policy is a dict of key-values described by https://boto.readthedocs.io/en/latest/ref/autoscale.html#boto.ec2.autoscale.policy.ScalingPolicy scaling_policies_from_pillar: name of pillar dict that contains scaling policy settings. Scaling policies defined for this specific state will override those from pillar. scheduled_actions: a dictionary of scheduled actions. Each key is the name of scheduled action and each value is dictionary of options. For example: .. code-block:: yaml - scheduled_actions: scale_up_at_10: desired_capacity: 4 min_size: 3 max_size: 5 recurrence: "0 9 * * 1-5" scale_down_at_7: desired_capacity: 1 min_size: 1 max_size: 1 recurrence: "0 19 * * 1-5" scheduled_actions_from_pillar: name of pillar dict that contains scheduled_actions settings. Scheduled actions for this specific state will override those from pillar. alarms: a dictionary of name->boto_cloudwatch_alarm sections to be associated with this ASG. All attributes should be specified except for dimension which will be automatically set to this ASG. See the :mod:`salt.states.boto_cloudwatch_alarm` state for information about these attributes. If any alarm actions include ":self:" this will be replaced with the asg name. For example, alarm_actions reading "['scaling_policy:self:ScaleUp']" will map to the arn for this asg's scaling policy named "ScaleUp". In addition, any alarms that have only scaling_policy as actions will be ignored if min_size is equal to max_size for this ASG. alarms_from_pillar: name of pillar dict that contains alarm settings. Alarms defined for this specific state will override those from pillar. region The region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. notification_arn The AWS arn that notifications will be sent to notification_arn_from_pillar name of the pillar dict that contains ``notifcation_arn`` settings. A ``notification_arn`` defined for this specific state will override the one from pillar. notification_types A list of event names that will trigger a notification. The list of valid notification types is: * ``autoscaling:EC2_INSTANCE_LAUNCH`` * ``autoscaling:EC2_INSTANCE_LAUNCH_ERROR`` * ``autoscaling:EC2_INSTANCE_TERMINATE`` * ``autoscaling:EC2_INSTANCE_TERMINATE_ERROR`` * ``autoscaling:TEST_NOTIFICATION`` notification_types_from_pillar name of the pillar dict that contains ``notifcation_types`` settings. ``notification_types`` defined for this specific state will override those from the pillar. ''' if vpc_zone_identifier and subnet_names: raise SaltInvocationError('vpc_zone_identifier and subnet_names are ' 'mutually exclusive options.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if subnet_names: vpc_zone_identifier = [] for i in subnet_names: r = __salt__['boto_vpc.get_resource_id']('subnet', name=i, region=region, key=key, keyid=keyid, profile=profile) if 'error' in r: ret['comment'] = 'Error looking up subnet ids: {0}'.format(r['error']) ret['result'] = False return ret if 'id' not in r: ret['comment'] = 'Subnet {0} does not exist.'.format(i) ret['result'] = False return ret vpc_zone_identifier.append(r['id']) if vpc_zone_identifier: vpc_id = __salt__['boto_vpc.get_subnet_association']( vpc_zone_identifier, region, key, keyid, profile ) vpc_id = vpc_id.get('vpc_id') log.debug('Auto Scaling Group %s is associated with VPC ID %s', name, vpc_id) else: vpc_id = None log.debug('Auto Scaling Group %s has no VPC Association', name) # if launch_config is defined, manage the launch config first. # hash the launch_config dict to create a unique name suffix and then # ensure it is present if launch_config: launch_config_bytes = salt.utils.stringutils.to_bytes(str(launch_config)) # future lint: disable=blacklisted-function launch_config_name = launch_config_name + '-' + hashlib.md5(launch_config_bytes).hexdigest() args = { 'name': launch_config_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } for index, item in enumerate(launch_config): if 'image_name' in item: image_name = item['image_name'] iargs = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**iargs) if image_ids: # find_images() returns False on failure launch_config[index]['image_id'] = image_ids[0] else: log.warning("Couldn't find AMI named `%s`, passing literally.", image_name) launch_config[index]['image_id'] = image_name del launch_config[index]['image_name'] break if vpc_id: log.debug('Auto Scaling Group {0} is a associated with a vpc') # locate the security groups attribute of a launch config sg_index = None for index, item in enumerate(launch_config): if 'security_groups' in item: sg_index = index break # if security groups exist within launch_config then convert # to group ids if sg_index is not None: log.debug('security group associations found in launch config') _group_ids = __salt__['boto_secgroup.convert_to_group_ids']( launch_config[sg_index]['security_groups'], vpc_id=vpc_id, region=region, key=key, keyid=keyid, profile=profile ) launch_config[sg_index]['security_groups'] = _group_ids for d in launch_config: args.update(d) if not __opts__['test']: lc_ret = __states__['boto_lc.present'](**args) if lc_ret['result'] is True and lc_ret['changes']: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config'] = lc_ret['changes'] asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) termination_policies = _determine_termination_policies( termination_policies, termination_policies_from_pillar ) scaling_policies = _determine_scaling_policies( scaling_policies, scaling_policies_from_pillar ) scheduled_actions = _determine_scheduled_actions( scheduled_actions, scheduled_actions_from_pillar ) if asg is None: ret['result'] = False ret['comment'] = 'Failed to check autoscale group existence.' elif not asg: if __opts__['test']: msg = 'Autoscale group set to be created.' ret['comment'] = msg ret['result'] = None return ret notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) created = __salt__['boto_asg.create'](name, launch_config_name, availability_zones, min_size, max_size, desired_capacity, load_balancers, default_cooldown, health_check_type, health_check_period, placement_group, vpc_zone_identifier, tags, termination_policies, suspended_processes, scaling_policies, scheduled_actions, region, notification_arn, notification_types, key, keyid, profile) if created: ret['changes']['old'] = None asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg else: ret['result'] = False ret['comment'] = 'Failed to create autoscale group' else: need_update = False # If any of these attributes can't be modified after creation # time, we should remove them from the dict. if scaling_policies: for policy in scaling_policies: if 'min_adjustment_step' not in policy: policy['min_adjustment_step'] = None if scheduled_actions: for s_name, action in six.iteritems(scheduled_actions): if 'end_time' not in action: action['end_time'] = None config = { 'launch_config_name': launch_config_name, 'availability_zones': availability_zones, 'min_size': min_size, 'max_size': max_size, 'desired_capacity': desired_capacity, 'default_cooldown': default_cooldown, 'health_check_type': health_check_type, 'health_check_period': health_check_period, 'vpc_zone_identifier': vpc_zone_identifier, 'tags': tags, 'termination_policies': termination_policies, 'suspended_processes': suspended_processes, 'scaling_policies': scaling_policies, 'scheduled_actions': scheduled_actions } #ensure that we reset termination_policies to default if none are specified if not termination_policies: config['termination_policies'] = ['Default'] if suspended_processes is None: config['suspended_processes'] = [] # ensure that we delete scaling_policies if none are specified if scaling_policies is None: config['scaling_policies'] = [] # ensure that we delete scheduled_actions if none are specified if scheduled_actions is None: config['scheduled_actions'] = {} # allow defaults on start_time for s_name, action in six.iteritems(scheduled_actions): if 'start_time' not in action: asg_action = asg['scheduled_actions'].get(s_name, {}) if 'start_time' in asg_action: del asg_action['start_time'] proposed = {} # note: do not loop using "key, value" - this can modify the value of # the aws access key for asg_property, value in six.iteritems(config): # Only modify values being specified; introspection is difficult # otherwise since it's hard to track default values, which will # always be returned from AWS. if value is None: continue value = __utils__['boto3.ordered'](value) if asg_property in asg: _value = __utils__['boto3.ordered'](asg[asg_property]) if not value == _value: log.debug('%s asg_property differs from %s', value, _value) proposed.setdefault('old', {}).update({asg_property: _value}) proposed.setdefault('new', {}).update({asg_property: value}) need_update = True if need_update: if __opts__['test']: msg = 'Autoscale group set to be updated.' ret['comment'] = msg ret['result'] = None ret['changes'] = proposed return ret # add in alarms notification_arn, notification_types = _determine_notification_info( notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar ) updated, msg = __salt__['boto_asg.update']( name, launch_config_name, availability_zones, min_size, max_size, desired_capacity=desired_capacity, load_balancers=load_balancers, default_cooldown=default_cooldown, health_check_type=health_check_type, health_check_period=health_check_period, placement_group=placement_group, vpc_zone_identifier=vpc_zone_identifier, tags=tags, termination_policies=termination_policies, suspended_processes=suspended_processes, scaling_policies=scaling_policies, scheduled_actions=scheduled_actions, region=region, notification_arn=notification_arn, notification_types=notification_types, key=key, keyid=keyid, profile=profile ) if asg['launch_config_name'] != launch_config_name: # delete the old launch_config_name deleted = __salt__['boto_asg.delete_launch_configuration']( asg['launch_config_name'], region=region, key=key, keyid=keyid, profile=profile ) if deleted: if 'launch_config' not in ret['changes']: ret['changes']['launch_config'] = {} ret['changes']['launch_config']['deleted'] = asg['launch_config_name'] if updated: ret['changes']['old'] = asg asg = __salt__['boto_asg.get_config'](name, region, key, keyid, profile) ret['changes']['new'] = asg ret['comment'] = 'Updated autoscale group.' else: ret['result'] = False ret['comment'] = msg else: ret['comment'] = 'Autoscale group present.' # add in alarms _ret = _alarms_present( name, min_size == max_size, alarms, alarms_from_pillar, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _determine_termination_policies(termination_policies, termination_policies_from_pillar): ''' helper method for present. ensure that termination_policies are set ''' pillar_termination_policies = copy.deepcopy( __salt__['config.option'](termination_policies_from_pillar, []) ) if not termination_policies and pillar_termination_policies: termination_policies = pillar_termination_policies return termination_policies def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar): ''' helper method for present. ensure that scaling_policies are set ''' pillar_scaling_policies = copy.deepcopy( __salt__['config.option'](scaling_policies_from_pillar, {}) ) if not scaling_policies and pillar_scaling_policies: scaling_policies = pillar_scaling_policies return scaling_policies def _determine_scheduled_actions(scheduled_actions, scheduled_actions_from_pillar): ''' helper method for present, ensure scheduled actions are setup ''' tmp = copy.deepcopy( __salt__['config.option'](scheduled_actions_from_pillar, {}) ) # merge with data from state if scheduled_actions: tmp = dictupdate.update(tmp, scheduled_actions) return tmp def _determine_notification_info(notification_arn, notification_arn_from_pillar, notification_types, notification_types_from_pillar): ''' helper method for present. ensure that notification_configs are set ''' pillar_arn_list = copy.deepcopy( __salt__['config.option'](notification_arn_from_pillar, {}) ) pillar_arn = None if pillar_arn_list: pillar_arn = pillar_arn_list[0] pillar_notification_types = copy.deepcopy( __salt__['config.option'](notification_types_from_pillar, {}) ) arn = notification_arn if notification_arn else pillar_arn types = notification_types if notification_types else pillar_notification_types return (arn, types) def _alarms_present(name, min_size_equals_max_size, alarms, alarms_from_pillar, region, key, keyid, profile): ''' helper method for present. ensure that cloudwatch_alarms are set ''' # load data from alarms_from_pillar tmp = copy.deepcopy(__salt__['config.option'](alarms_from_pillar, {})) # merge with data from alarms if alarms: tmp = dictupdate.update(tmp, alarms) # set alarms, using boto_cloudwatch_alarm.present merged_return_value = {'name': name, 'result': True, 'comment': '', 'changes': {}} for _, info in six.iteritems(tmp): # add asg to name and description info['name'] = name + ' ' + info['name'] info['attributes']['description'] = name + ' ' + info['attributes']['description'] # add dimension attribute if 'dimensions' not in info['attributes']: info['attributes']['dimensions'] = {'AutoScalingGroupName': [name]} scaling_policy_actions_only = True # replace ":self:" with our name for action_type in ['alarm_actions', 'insufficient_data_actions', 'ok_actions']: if action_type in info['attributes']: new_actions = [] for action in info['attributes'][action_type]: if 'scaling_policy' not in action: scaling_policy_actions_only = False if ':self:' in action: action = action.replace(':self:', ':{0}:'.format(name)) new_actions.append(action) info['attributes'][action_type] = new_actions # skip alarms that only have actions for scaling policy, if min_size == max_size for this ASG if scaling_policy_actions_only and min_size_equals_max_size: continue # set alarm kwargs = { 'name': info['name'], 'attributes': info['attributes'], 'region': region, 'key': key, 'keyid': keyid, 'profile': profile, } results = __states__['boto_cloudwatch_alarm.present'](**kwargs) if not results['result']: merged_return_value['result'] = False if results.get('changes', {}) != {}: merged_return_value['changes'][info['name']] = results['changes'] if 'comment' in results: merged_return_value['comment'] += results['comment'] return merged_return_value
saltstack/salt
salt/modules/kubernetesmod.py
_setup_conn_old
python
def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {}
Setup kubernetes API connection singleton the old way
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L117-L188
null
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
_setup_conn
python
def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context}
Setup kubernetes API connection singleton
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L192-L219
null
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
ping
python
def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status
Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L251-L265
[ "def nodes(**kwargs):\n '''\n Return the names of the nodes composing the kubernetes cluster\n\n CLI Examples::\n\n salt '*' kubernetes.nodes\n salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube\n '''\n cfg = _setup_conn(**kwargs)\n try:\n api_instance = kubernetes.client.CoreV1Api()\n api_response = api_instance.list_node()\n\n return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')]\n except (ApiException, HTTPError) as exc:\n if isinstance(exc, ApiException) and exc.status == 404:\n return None\n else:\n log.exception('Exception when calling CoreV1Api->list_node')\n raise CommandExecutionError(exc)\n finally:\n _cleanup(**cfg)\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
nodes
python
def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L268-L290
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
node
python
def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None
Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L293-L318
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
node_add_label
python
def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None
Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar"
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L337-L368
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
namespaces
python
def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L404-L426
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
deployments
python
def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L429-L454
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
services
python
def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L457-L482
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
pods
python
def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L485-L510
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
configmaps
python
def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L541-L566
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
show_deployment
python
def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L569-L594
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
show_namespace
python
def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L653-L677
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
show_secret
python
def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L680-L713
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
delete_deployment
python
def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L746-L798
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n", "def show_deployment(name, namespace='default', **kwargs):\n '''\n Return the kubernetes deployment defined by name and namespace\n\n CLI Examples::\n\n salt '*' kubernetes.show_deployment my-nginx default\n salt '*' kubernetes.show_deployment name=my-nginx namespace=default\n '''\n cfg = _setup_conn(**kwargs)\n try:\n api_instance = kubernetes.client.ExtensionsV1beta1Api()\n api_response = api_instance.read_namespaced_deployment(name, namespace)\n\n return api_response.to_dict()\n except (ApiException, HTTPError) as exc:\n if isinstance(exc, ApiException) and exc.status == 404:\n return None\n else:\n log.exception(\n 'Exception when calling '\n 'ExtensionsV1beta1Api->read_namespaced_deployment'\n )\n raise CommandExecutionError(exc)\n finally:\n _cleanup(**cfg)\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
delete_service
python
def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L801-L828
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
delete_pod
python
def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L831-L861
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
create_pod
python
def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Creates the kubernetes deployment as defined by the user.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1003-L1045
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n", "def __create_object_body(kind,\n obj_class,\n spec_creator,\n name,\n namespace,\n metadata,\n spec,\n source,\n template,\n saltenv):\n '''\n Create a Kubernetes Object body instance.\n '''\n if source:\n src_obj = __read_and_render_yaml_file(source, template, saltenv)\n if (\n not isinstance(src_obj, dict) or\n 'kind' not in src_obj or\n src_obj['kind'] != kind):\n raise CommandExecutionError(\n 'The source file should define only '\n 'a {0} object'.format(kind))\n\n if 'metadata' in src_obj:\n metadata = src_obj['metadata']\n if 'spec' in src_obj:\n spec = src_obj['spec']\n\n return obj_class(\n metadata=__dict_to_object_meta(name, namespace, metadata),\n spec=spec_creator(spec))\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
create_secret
python
def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1093-L1145
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n", "def __read_and_render_yaml_file(source,\n template,\n saltenv):\n '''\n Read a yaml file and, if needed, renders that using the specifieds\n templating. Returns the python objects defined inside of the file.\n '''\n sfn = __salt__['cp.cache_file'](source, saltenv)\n if not sfn:\n raise CommandExecutionError(\n 'Source file \\'{0}\\' not found'.format(source))\n\n with salt.utils.files.fopen(sfn, 'r') as src:\n contents = src.read()\n\n if template:\n if template in salt.utils.templates.TEMPLATE_REGISTRY:\n # TODO: should we allow user to set also `context` like # pylint: disable=fixme\n # `file.managed` does?\n # Apply templating\n data = salt.utils.templates.TEMPLATE_REGISTRY[template](\n contents,\n from_str=True,\n to_str=True,\n saltenv=saltenv,\n grains=__grains__,\n pillar=__pillar__,\n salt=__salt__,\n opts=__opts__)\n\n if not data['result']:\n # Failed to render the template\n raise CommandExecutionError(\n 'Failed to render file path with error: '\n '{0}'.format(data['data'])\n )\n\n contents = data['data'].encode('utf-8')\n else:\n raise CommandExecutionError(\n 'Unknown template specified: {0}'.format(\n template))\n\n return salt.utils.yaml.safe_load(contents)\n", "def __enforce_only_strings_dict(dictionary):\n '''\n Returns a dictionary that has string keys and values.\n '''\n ret = {}\n\n for key, value in iteritems(dictionary):\n ret[six.text_type(key)] = six.text_type(value)\n\n return ret\n", "def __dict_to_object_meta(name, namespace, metadata):\n '''\n Converts a dictionary into kubernetes ObjectMetaV1 instance.\n '''\n meta_obj = kubernetes.client.V1ObjectMeta()\n meta_obj.namespace = namespace\n\n # Replicate `kubectl [create|replace|apply] --record`\n if 'annotations' not in metadata:\n metadata['annotations'] = {}\n if 'kubernetes.io/change-cause' not in metadata['annotations']:\n metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv)\n\n for key, value in iteritems(metadata):\n if hasattr(meta_obj, key):\n setattr(meta_obj, key, value)\n\n if meta_obj.name != name:\n log.warning(\n 'The object already has a name attribute, overwriting it with '\n 'the one defined inside of salt')\n meta_obj.name = name\n\n return meta_obj\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
create_configmap
python
def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1148-L1196
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n", "def __read_and_render_yaml_file(source,\n template,\n saltenv):\n '''\n Read a yaml file and, if needed, renders that using the specifieds\n templating. Returns the python objects defined inside of the file.\n '''\n sfn = __salt__['cp.cache_file'](source, saltenv)\n if not sfn:\n raise CommandExecutionError(\n 'Source file \\'{0}\\' not found'.format(source))\n\n with salt.utils.files.fopen(sfn, 'r') as src:\n contents = src.read()\n\n if template:\n if template in salt.utils.templates.TEMPLATE_REGISTRY:\n # TODO: should we allow user to set also `context` like # pylint: disable=fixme\n # `file.managed` does?\n # Apply templating\n data = salt.utils.templates.TEMPLATE_REGISTRY[template](\n contents,\n from_str=True,\n to_str=True,\n saltenv=saltenv,\n grains=__grains__,\n pillar=__pillar__,\n salt=__salt__,\n opts=__opts__)\n\n if not data['result']:\n # Failed to render the template\n raise CommandExecutionError(\n 'Failed to render file path with error: '\n '{0}'.format(data['data'])\n )\n\n contents = data['data'].encode('utf-8')\n else:\n raise CommandExecutionError(\n 'Unknown template specified: {0}'.format(\n template))\n\n return salt.utils.yaml.safe_load(contents)\n", "def __enforce_only_strings_dict(dictionary):\n '''\n Returns a dictionary that has string keys and values.\n '''\n ret = {}\n\n for key, value in iteritems(dictionary):\n ret[six.text_type(key)] = six.text_type(value)\n\n return ret\n", "def __dict_to_object_meta(name, namespace, metadata):\n '''\n Converts a dictionary into kubernetes ObjectMetaV1 instance.\n '''\n meta_obj = kubernetes.client.V1ObjectMeta()\n meta_obj.namespace = namespace\n\n # Replicate `kubectl [create|replace|apply] --record`\n if 'annotations' not in metadata:\n metadata['annotations'] = {}\n if 'kubernetes.io/change-cause' not in metadata['annotations']:\n metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv)\n\n for key, value in iteritems(metadata):\n if hasattr(meta_obj, key):\n setattr(meta_obj, key, value)\n\n if meta_obj.name != name:\n log.warning(\n 'The object already has a name attribute, overwriting it with '\n 'the one defined inside of salt')\n meta_obj.name = name\n\n return meta_obj\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
create_namespace
python
def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1199-L1231
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
replace_deployment
python
def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1234-L1276
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n", "def __create_object_body(kind,\n obj_class,\n spec_creator,\n name,\n namespace,\n metadata,\n spec,\n source,\n template,\n saltenv):\n '''\n Create a Kubernetes Object body instance.\n '''\n if source:\n src_obj = __read_and_render_yaml_file(source, template, saltenv)\n if (\n not isinstance(src_obj, dict) or\n 'kind' not in src_obj or\n src_obj['kind'] != kind):\n raise CommandExecutionError(\n 'The source file should define only '\n 'a {0} object'.format(kind))\n\n if 'metadata' in src_obj:\n metadata = src_obj['metadata']\n if 'spec' in src_obj:\n spec = src_obj['spec']\n\n return obj_class(\n metadata=__dict_to_object_meta(name, namespace, metadata),\n spec=spec_creator(spec))\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
replace_service
python
def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg)
Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1279-L1327
[ "def _cleanup(**kwargs):\n if not kwargs:\n return _cleanup_old(**kwargs)\n\n if 'kubeconfig' in kwargs:\n kubeconfig = kwargs.get('kubeconfig')\n if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'):\n try:\n os.unlink(kubeconfig)\n except (IOError, OSError) as err:\n if err.errno != errno.ENOENT:\n log.exception(err)\n", "def _setup_conn(**kwargs):\n '''\n Setup kubernetes API connection singleton\n '''\n kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig')\n kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data')\n context = kwargs.get('context') or __salt__['config.option']('kubernetes.context')\n\n if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')):\n with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg:\n kcfg.write(base64.b64decode(kubeconfig_data))\n kubeconfig = kcfg.name\n\n if not (kubeconfig and context):\n if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'):\n salt.utils.versions.warn_until('Sodium',\n 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. '\n 'Use \\'kubeconfig\\' and \\'context\\' instead.')\n try:\n return _setup_conn_old(**kwargs)\n except Exception:\n raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0')\n else:\n raise CommandExecutionError('Invalid kubernetes configuration. Parameter \\'kubeconfig\\' and \\'context\\' are required.')\n kubernetes.config.load_kube_config(config_file=kubeconfig, context=context)\n\n # The return makes unit testing easier\n return {'kubeconfig': kubeconfig, 'context': context}\n", "def __create_object_body(kind,\n obj_class,\n spec_creator,\n name,\n namespace,\n metadata,\n spec,\n source,\n template,\n saltenv):\n '''\n Create a Kubernetes Object body instance.\n '''\n if source:\n src_obj = __read_and_render_yaml_file(source, template, saltenv)\n if (\n not isinstance(src_obj, dict) or\n 'kind' not in src_obj or\n src_obj['kind'] != kind):\n raise CommandExecutionError(\n 'The source file should define only '\n 'a {0} object'.format(kind))\n\n if 'metadata' in src_obj:\n metadata = src_obj['metadata']\n if 'spec' in src_obj:\n spec = src_obj['spec']\n\n return obj_class(\n metadata=__dict_to_object_meta(name, namespace, metadata),\n spec=spec_creator(spec))\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
__create_object_body
python
def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec))
Create a Kubernetes Object body instance.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1434-L1464
[ "def __dict_to_deployment_spec(spec):\n '''\n Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance.\n '''\n spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', ''))\n for key, value in iteritems(spec):\n if hasattr(spec_obj, key):\n setattr(spec_obj, key, value)\n\n return spec_obj\n", "def __dict_to_pod_spec(spec):\n '''\n Converts a dictionary into kubernetes V1PodSpec instance.\n '''\n spec_obj = kubernetes.client.V1PodSpec()\n for key, value in iteritems(spec):\n if hasattr(spec_obj, key):\n setattr(spec_obj, key, value)\n\n return spec_obj\n", "def __dict_to_service_spec(spec):\n '''\n Converts a dictionary into kubernetes V1ServiceSpec instance.\n '''\n spec_obj = kubernetes.client.V1ServiceSpec()\n for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks\n if key == 'ports':\n spec_obj.ports = []\n for port in value:\n kube_port = kubernetes.client.V1ServicePort()\n if isinstance(port, dict):\n for port_key, port_value in iteritems(port):\n if hasattr(kube_port, port_key):\n setattr(kube_port, port_key, port_value)\n else:\n kube_port.port = port\n spec_obj.ports.append(kube_port)\n elif hasattr(spec_obj, key):\n setattr(spec_obj, key, value)\n\n return spec_obj\n", "def __read_and_render_yaml_file(source,\n template,\n saltenv):\n '''\n Read a yaml file and, if needed, renders that using the specifieds\n templating. Returns the python objects defined inside of the file.\n '''\n sfn = __salt__['cp.cache_file'](source, saltenv)\n if not sfn:\n raise CommandExecutionError(\n 'Source file \\'{0}\\' not found'.format(source))\n\n with salt.utils.files.fopen(sfn, 'r') as src:\n contents = src.read()\n\n if template:\n if template in salt.utils.templates.TEMPLATE_REGISTRY:\n # TODO: should we allow user to set also `context` like # pylint: disable=fixme\n # `file.managed` does?\n # Apply templating\n data = salt.utils.templates.TEMPLATE_REGISTRY[template](\n contents,\n from_str=True,\n to_str=True,\n saltenv=saltenv,\n grains=__grains__,\n pillar=__pillar__,\n salt=__salt__,\n opts=__opts__)\n\n if not data['result']:\n # Failed to render the template\n raise CommandExecutionError(\n 'Failed to render file path with error: '\n '{0}'.format(data['data'])\n )\n\n contents = data['data'].encode('utf-8')\n else:\n raise CommandExecutionError(\n 'Unknown template specified: {0}'.format(\n template))\n\n return salt.utils.yaml.safe_load(contents)\n", "def __dict_to_object_meta(name, namespace, metadata):\n '''\n Converts a dictionary into kubernetes ObjectMetaV1 instance.\n '''\n meta_obj = kubernetes.client.V1ObjectMeta()\n meta_obj.namespace = namespace\n\n # Replicate `kubectl [create|replace|apply] --record`\n if 'annotations' not in metadata:\n metadata['annotations'] = {}\n if 'kubernetes.io/change-cause' not in metadata['annotations']:\n metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv)\n\n for key, value in iteritems(metadata):\n if hasattr(meta_obj, key):\n setattr(meta_obj, key, value)\n\n if meta_obj.name != name:\n log.warning(\n 'The object already has a name attribute, overwriting it with '\n 'the one defined inside of salt')\n meta_obj.name = name\n\n return meta_obj\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
__read_and_render_yaml_file
python
def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents)
Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1467-L1510
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def safe_load(stream, Loader=SaltYamlSafeLoader):\n '''\n .. versionadded:: 2018.3.0\n\n Helper function which automagically uses our custom loader.\n '''\n return yaml.load(stream, Loader=Loader)\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
__dict_to_object_meta
python
def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj
Converts a dictionary into kubernetes ObjectMetaV1 instance.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1513-L1536
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
__dict_to_deployment_spec
python
def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj
Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1539-L1548
null
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
__dict_to_pod_spec
python
def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj
Converts a dictionary into kubernetes V1PodSpec instance.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1551-L1560
null
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
__dict_to_service_spec
python
def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj
Converts a dictionary into kubernetes V1ServiceSpec instance.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1563-L1583
null
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
saltstack/salt
salt/modules/kubernetesmod.py
__enforce_only_strings_dict
python
def __enforce_only_strings_dict(dictionary): ''' Returns a dictionary that has string keys and values. ''' ret = {} for key, value in iteritems(dictionary): ret[six.text_type(key)] = six.text_type(value) return ret
Returns a dictionary that has string keys and values.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/kubernetesmod.py#L1586-L1595
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Module for handling kubernetes calls. :optdepends: - kubernetes Python client :configuration: The k8s API settings are provided either in a pillar, in the minion's config file, or in master's config file:: kubernetes.kubeconfig: '/path/to/kubeconfig' kubernetes.kubeconfig-data: '<base64 encoded kubeconfig content' kubernetes.context: 'context' These settings can be overridden by adding `context and `kubeconfig` or `kubeconfig_data` parameters when calling a function. The data format for `kubernetes.kubeconfig-data` value is the content of `kubeconfig` base64 encoded in one line. Only `kubeconfig` or `kubeconfig-data` should be provided. In case both are provided `kubeconfig` entry is preferred. .. code-block:: bash salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube .. versionadded: 2017.7.0 .. versionchanged:: 2019.2.0 .. warning:: Configuration options changed in 2019.2.0. The following configuration options have been removed: - kubernetes.user - kubernetes.password - kubernetes.api_url - kubernetes.certificate-authority-data/file - kubernetes.client-certificate-data/file - kubernetes.client-key-data/file Please use now: - kubernetes.kubeconfig or kubernetes.kubeconfig-data - kubernetes.context ''' # Import Python Futures from __future__ import absolute_import, unicode_literals, print_function import sys import os.path import base64 import errno import logging import tempfile import signal from time import sleep from contextlib import contextmanager from salt.exceptions import CommandExecutionError from salt.ext.six import iteritems from salt.ext import six import salt.utils.files import salt.utils.platform import salt.utils.templates import salt.utils.versions import salt.utils.yaml from salt.exceptions import TimeoutError from salt.ext.six.moves import range # pylint: disable=import-error try: import kubernetes # pylint: disable=import-self import kubernetes.client from kubernetes.client.rest import ApiException from urllib3.exceptions import HTTPError try: # There is an API change in Kubernetes >= 2.0.0. from kubernetes.client import V1beta1Deployment as AppsV1beta1Deployment from kubernetes.client import V1beta1DeploymentSpec as AppsV1beta1DeploymentSpec except ImportError: from kubernetes.client import AppsV1beta1Deployment from kubernetes.client import AppsV1beta1DeploymentSpec HAS_LIBS = True except ImportError: HAS_LIBS = False log = logging.getLogger(__name__) __virtualname__ = 'kubernetes' def __virtual__(): ''' Check dependencies ''' if HAS_LIBS: return __virtualname__ return False, 'python kubernetes library not found' if not salt.utils.platform.is_windows(): @contextmanager def _time_limit(seconds): def signal_handler(signum, frame): raise TimeoutError signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) POLLING_TIME_LIMIT = 30 def _setup_conn_old(**kwargs): ''' Setup kubernetes API connection singleton the old way ''' host = __salt__['config.option']('kubernetes.api_url', 'http://localhost:8080') username = __salt__['config.option']('kubernetes.user') password = __salt__['config.option']('kubernetes.password') ca_cert = __salt__['config.option']('kubernetes.certificate-authority-data') client_cert = __salt__['config.option']('kubernetes.client-certificate-data') client_key = __salt__['config.option']('kubernetes.client-key-data') ca_cert_file = __salt__['config.option']('kubernetes.certificate-authority-file') client_cert_file = __salt__['config.option']('kubernetes.client-certificate-file') client_key_file = __salt__['config.option']('kubernetes.client-key-file') # Override default API settings when settings are provided if 'api_url' in kwargs: host = kwargs.get('api_url') if 'api_user' in kwargs: username = kwargs.get('api_user') if 'api_password' in kwargs: password = kwargs.get('api_password') if 'api_certificate_authority_file' in kwargs: ca_cert_file = kwargs.get('api_certificate_authority_file') if 'api_client_certificate_file' in kwargs: client_cert_file = kwargs.get('api_client_certificate_file') if 'api_client_key_file' in kwargs: client_key_file = kwargs.get('api_client_key_file') if ( kubernetes.client.configuration.host != host or kubernetes.client.configuration.user != username or kubernetes.client.configuration.password != password): # Recreates API connection if settings are changed kubernetes.client.configuration.__init__() kubernetes.client.configuration.host = host kubernetes.client.configuration.user = username kubernetes.client.configuration.passwd = password if ca_cert_file: kubernetes.client.configuration.ssl_ca_cert = ca_cert_file elif ca_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as ca: ca.write(base64.b64decode(ca_cert)) kubernetes.client.configuration.ssl_ca_cert = ca.name else: kubernetes.client.configuration.ssl_ca_cert = None if client_cert_file: kubernetes.client.configuration.cert_file = client_cert_file elif client_cert: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as c: c.write(base64.b64decode(client_cert)) kubernetes.client.configuration.cert_file = c.name else: kubernetes.client.configuration.cert_file = None if client_key_file: kubernetes.client.configuration.key_file = client_key_file elif client_key: with tempfile.NamedTemporaryFile(prefix='salt-kube-', delete=False) as k: k.write(base64.b64decode(client_key)) kubernetes.client.configuration.key_file = k.name else: kubernetes.client.configuration.key_file = None return {} # pylint: disable=no-member def _setup_conn(**kwargs): ''' Setup kubernetes API connection singleton ''' kubeconfig = kwargs.get('kubeconfig') or __salt__['config.option']('kubernetes.kubeconfig') kubeconfig_data = kwargs.get('kubeconfig_data') or __salt__['config.option']('kubernetes.kubeconfig-data') context = kwargs.get('context') or __salt__['config.option']('kubernetes.context') if (kubeconfig_data and not kubeconfig) or (kubeconfig_data and kwargs.get('kubeconfig_data')): with tempfile.NamedTemporaryFile(prefix='salt-kubeconfig-', delete=False) as kcfg: kcfg.write(base64.b64decode(kubeconfig_data)) kubeconfig = kcfg.name if not (kubeconfig and context): if kwargs.get('api_url') or __salt__['config.option']('kubernetes.api_url'): salt.utils.versions.warn_until('Sodium', 'Kubernetes configuration via url, certificate, username and password will be removed in Sodiom. ' 'Use \'kubeconfig\' and \'context\' instead.') try: return _setup_conn_old(**kwargs) except Exception: raise CommandExecutionError('Old style kubernetes configuration is only supported up to python-kubernetes 2.0.0') else: raise CommandExecutionError('Invalid kubernetes configuration. Parameter \'kubeconfig\' and \'context\' are required.') kubernetes.config.load_kube_config(config_file=kubeconfig, context=context) # The return makes unit testing easier return {'kubeconfig': kubeconfig, 'context': context} def _cleanup_old(**kwargs): try: ca = kubernetes.client.configuration.ssl_ca_cert cert = kubernetes.client.configuration.cert_file key = kubernetes.client.configuration.key_file if cert and os.path.exists(cert) and os.path.basename(cert).startswith('salt-kube-'): salt.utils.files.safe_rm(cert) if key and os.path.exists(key) and os.path.basename(key).startswith('salt-kube-'): salt.utils.files.safe_rm(key) if ca and os.path.exists(ca) and os.path.basename(ca).startswith('salt-kube-'): salt.utils.files.safe_rm(ca) except Exception: pass def _cleanup(**kwargs): if not kwargs: return _cleanup_old(**kwargs) if 'kubeconfig' in kwargs: kubeconfig = kwargs.get('kubeconfig') if kubeconfig and os.path.basename(kubeconfig).startswith('salt-kubeconfig-'): try: os.unlink(kubeconfig) except (IOError, OSError) as err: if err.errno != errno.ENOENT: log.exception(err) def ping(**kwargs): ''' Checks connections with the kubernetes API server. Returns True if the connection can be established, False otherwise. CLI Example: salt '*' kubernetes.ping ''' status = True try: nodes(**kwargs) except CommandExecutionError: status = False return status def nodes(**kwargs): ''' Return the names of the nodes composing the kubernetes cluster CLI Examples:: salt '*' kubernetes.nodes salt '*' kubernetes.nodes kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() return [k8s_node['metadata']['name'] for k8s_node in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def node(name, **kwargs): ''' Return the details of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node name='minikube' ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_node() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) for k8s_node in api_response.items: if k8s_node.metadata.name == name: return k8s_node.to_dict() return None def node_labels(name, **kwargs): ''' Return the labels of the node identified by the specified name CLI Examples:: salt '*' kubernetes.node_labels name="minikube" ''' match = node(name, **kwargs) if match is not None: return match['metadata']['labels'] return {} def node_add_label(node_name, label_name, label_value, **kwargs): ''' Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def node_remove_label(node_name, label_name, **kwargs): ''' Removes the label identified by `label_name` from the node identified by the name `node_name`. CLI Examples:: salt '*' kubernetes.node_remove_label node_name="minikube" \ label_name="foo" ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: None} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None def namespaces(**kwargs): ''' Return the names of the available namespaces CLI Examples:: salt '*' kubernetes.namespaces salt '*' kubernetes.namespaces kubeconfig=/etc/salt/k8s/kubeconfig context=minikube ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespace() return [nms['metadata']['name'] for nms in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->list_namespace') raise CommandExecutionError(exc) finally: _cleanup(**cfg) def deployments(namespace='default', **kwargs): ''' Return a list of kubernetes deployments defined in the namespace CLI Examples:: salt '*' kubernetes.deployments salt '*' kubernetes.deployments namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.list_namespaced_deployment(namespace) return [dep['metadata']['name'] for dep in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->list_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def services(namespace='default', **kwargs): ''' Return a list of kubernetes services defined in the namespace CLI Examples:: salt '*' kubernetes.services salt '*' kubernetes.services namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_service(namespace) return [srv['metadata']['name'] for srv in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def pods(namespace='default', **kwargs): ''' Return a list of kubernetes pods defined in the namespace CLI Examples:: salt '*' kubernetes.pods salt '*' kubernetes.pods namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_pod(namespace) return [pod['metadata']['name'] for pod in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def secrets(namespace='default', **kwargs): ''' Return a list of kubernetes secrets defined in the namespace CLI Examples:: salt '*' kubernetes.secrets salt '*' kubernetes.secrets namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_secret(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def configmaps(namespace='default', **kwargs): ''' Return a list of kubernetes configmaps defined in the namespace CLI Examples:: salt '*' kubernetes.configmaps salt '*' kubernetes.configmaps namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.list_namespaced_config_map(namespace) return [secret['metadata']['name'] for secret in api_response.to_dict().get('items')] except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->list_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_deployment(name, namespace='default', **kwargs): ''' Return the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.show_deployment my-nginx default salt '*' kubernetes.show_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.read_namespaced_deployment(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->read_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_service(name, namespace='default', **kwargs): ''' Return the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.show_service my-nginx default salt '*' kubernetes.show_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_service(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_pod(name, namespace='default', **kwargs): ''' Return POD information for a given pod name defined in the namespace CLI Examples:: salt '*' kubernetes.show_pod guestbook-708336848-fqr2x salt '*' kubernetes.show_pod guestbook-708336848-fqr2x namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_pod(name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_namespace(name, **kwargs): ''' Return information for a given namespace defined by the specified name CLI Examples:: salt '*' kubernetes.show_namespace kube-system ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespace(name) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_secret(name, namespace='default', decode=False, **kwargs): ''' Return the kubernetes secret defined by name and namespace. The secrets can be decoded if specified by the user. Warning: this has security implications. CLI Examples:: salt '*' kubernetes.show_secret confidential default salt '*' kubernetes.show_secret name=confidential namespace=default salt '*' kubernetes.show_secret name=confidential decode=True ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_secret(name, namespace) if api_response.data and (decode or decode == 'True'): for key in api_response.data: value = api_response.data[key] api_response.data[key] = base64.b64decode(value) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def show_configmap(name, namespace='default', **kwargs): ''' Return the kubernetes configmap defined by name and namespace. CLI Examples:: salt '*' kubernetes.show_configmap game-config default salt '*' kubernetes.show_configmap name=game-config namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.read_namespaced_config_map( name, namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->read_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_deployment(name, namespace='default', **kwargs): ''' Deletes the kubernetes deployment defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_deployment my-nginx salt '*' kubernetes.delete_deployment name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.delete_namespaced_deployment( name=name, namespace=namespace, body=body) mutable_api_response = api_response.to_dict() if not salt.utils.platform.is_windows(): try: with _time_limit(POLLING_TIME_LIMIT): while show_deployment(name, namespace) is not None: sleep(1) else: # pylint: disable=useless-else-on-loop mutable_api_response['code'] = 200 except TimeoutError: pass else: # Windows has not signal.alarm implementation, so we are just falling # back to loop-counting. for i in range(60): if show_deployment(name, namespace) is None: mutable_api_response['code'] = 200 break else: sleep(1) if mutable_api_response['code'] != 200: log.warning('Reached polling time limit. Deployment is not yet ' 'deleted, but we are backing off. Sorry, but you\'ll ' 'have to check manually.') return mutable_api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->delete_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_service(name, namespace='default', **kwargs): ''' Deletes the kubernetes service defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_service my-nginx default salt '*' kubernetes.delete_service name=my-nginx namespace=default ''' cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_service( name=name, namespace=namespace) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_pod(name, namespace='default', **kwargs): ''' Deletes the kubernetes pod defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_pod guestbook-708336848-5nl8c default salt '*' kubernetes.delete_pod name=guestbook-708336848-5nl8c namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_pod( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_namespace(name, **kwargs): ''' Deletes the kubernetes namespace defined by name CLI Examples:: salt '*' kubernetes.delete_namespace salt salt '*' kubernetes.delete_namespace name=salt ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespace(name=name, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_secret(name, namespace='default', **kwargs): ''' Deletes the kubernetes secret defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_secret confidential default salt '*' kubernetes.delete_secret name=confidential namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_secret( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling CoreV1Api->delete_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def delete_configmap(name, namespace='default', **kwargs): ''' Deletes the kubernetes configmap defined by name and namespace CLI Examples:: salt '*' kubernetes.delete_configmap settings default salt '*' kubernetes.delete_configmap name=settings namespace=default ''' cfg = _setup_conn(**kwargs) body = kubernetes.client.V1DeleteOptions(orphan_dependents=True) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.delete_namespaced_config_map( name=name, namespace=namespace, body=body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->delete_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_deployment( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.create_namespaced_deployment( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->create_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_pod( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes deployment as defined by the user. ''' body = __create_object_body( kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_pod( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_pod' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_service( name, namespace, metadata, spec, source, template, saltenv, **kwargs): ''' Creates the kubernetes service as defined by the user. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_service( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_secret( name, namespace='default', data=None, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes secret as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_secret \ passwords default '{"db": "letmein"}' salt 'minion2' kubernetes.create_secret \ name=passwords namespace=default data='{"db": "letmein"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_secret( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_configmap( name, namespace, data, source=None, template=None, saltenv='base', **kwargs): ''' Creates the kubernetes configmap as defined by the user. CLI Examples:: salt 'minion1' kubernetes.create_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.create_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespaced_config_map( namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespaced_config_map' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def create_namespace( name, **kwargs): ''' Creates a namespace with the specified name. CLI Example: salt '*' kubernetes.create_namespace salt salt '*' kubernetes.create_namespace name=salt ''' meta_obj = kubernetes.client.V1ObjectMeta(name=name) body = kubernetes.client.V1Namespace(metadata=meta_obj) body.metadata.name = name cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.create_namespace(body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->create_namespace' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_deployment(name, metadata, spec, source, template, saltenv, namespace='default', **kwargs): ''' Replaces an existing deployment with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Deployment', obj_class=AppsV1beta1Deployment, spec_creator=__dict_to_deployment_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.ExtensionsV1beta1Api() api_response = api_instance.replace_namespaced_deployment( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'ExtensionsV1beta1Api->replace_namespaced_deployment' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_service(name, metadata, spec, source, template, old_service, saltenv, namespace='default', **kwargs): ''' Replaces an existing service with a new one defined by name and namespace, having the specificed metadata and spec. ''' body = __create_object_body( kind='Service', obj_class=kubernetes.client.V1Service, spec_creator=__dict_to_service_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv) # Some attributes have to be preserved # otherwise exceptions will be thrown body.spec.cluster_ip = old_service['spec']['cluster_ip'] body.metadata.resource_version = old_service['metadata']['resource_version'] cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_service( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_service' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_secret(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing secret with a new one defined by name and namespace, having the specificed data. CLI Examples:: salt 'minion1' kubernetes.replace_secret \ name=passwords data='{"db": "letmein"}' salt 'minion2' kubernetes.replace_secret \ name=passwords namespace=saltstack data='{"db": "passw0rd"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) elif data is None: data = {} data = __enforce_only_strings_dict(data) # encode the secrets using base64 as required by kubernetes for key in data: data[key] = base64.b64encode(data[key]) body = kubernetes.client.V1Secret( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_secret( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_secret' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def replace_configmap(name, data, source=None, template=None, saltenv='base', namespace='default', **kwargs): ''' Replaces an existing configmap with a new one defined by name and namespace with the specified data. CLI Examples:: salt 'minion1' kubernetes.replace_configmap \ settings default '{"example.conf": "# example file"}' salt 'minion2' kubernetes.replace_configmap \ name=settings namespace=default data='{"example.conf": "# example file"}' ''' if source: data = __read_and_render_yaml_file(source, template, saltenv) data = __enforce_only_strings_dict(data) body = kubernetes.client.V1ConfigMap( metadata=__dict_to_object_meta(name, namespace, {}), data=data) cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() api_response = api_instance.replace_namespaced_config_map( name, namespace, body) return api_response.to_dict() except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception( 'Exception when calling ' 'CoreV1Api->replace_namespaced_configmap' ) raise CommandExecutionError(exc) finally: _cleanup(**cfg) def __create_object_body(kind, obj_class, spec_creator, name, namespace, metadata, spec, source, template, saltenv): ''' Create a Kubernetes Object body instance. ''' if source: src_obj = __read_and_render_yaml_file(source, template, saltenv) if ( not isinstance(src_obj, dict) or 'kind' not in src_obj or src_obj['kind'] != kind): raise CommandExecutionError( 'The source file should define only ' 'a {0} object'.format(kind)) if 'metadata' in src_obj: metadata = src_obj['metadata'] if 'spec' in src_obj: spec = src_obj['spec'] return obj_class( metadata=__dict_to_object_meta(name, namespace, metadata), spec=spec_creator(spec)) def __read_and_render_yaml_file(source, template, saltenv): ''' Read a yaml file and, if needed, renders that using the specifieds templating. Returns the python objects defined inside of the file. ''' sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: raise CommandExecutionError( 'Source file \'{0}\' not found'.format(source)) with salt.utils.files.fopen(sfn, 'r') as src: contents = src.read() if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: # TODO: should we allow user to set also `context` like # pylint: disable=fixme # `file.managed` does? # Apply templating data = salt.utils.templates.TEMPLATE_REGISTRY[template]( contents, from_str=True, to_str=True, saltenv=saltenv, grains=__grains__, pillar=__pillar__, salt=__salt__, opts=__opts__) if not data['result']: # Failed to render the template raise CommandExecutionError( 'Failed to render file path with error: ' '{0}'.format(data['data']) ) contents = data['data'].encode('utf-8') else: raise CommandExecutionError( 'Unknown template specified: {0}'.format( template)) return salt.utils.yaml.safe_load(contents) def __dict_to_object_meta(name, namespace, metadata): ''' Converts a dictionary into kubernetes ObjectMetaV1 instance. ''' meta_obj = kubernetes.client.V1ObjectMeta() meta_obj.namespace = namespace # Replicate `kubectl [create|replace|apply] --record` if 'annotations' not in metadata: metadata['annotations'] = {} if 'kubernetes.io/change-cause' not in metadata['annotations']: metadata['annotations']['kubernetes.io/change-cause'] = ' '.join(sys.argv) for key, value in iteritems(metadata): if hasattr(meta_obj, key): setattr(meta_obj, key, value) if meta_obj.name != name: log.warning( 'The object already has a name attribute, overwriting it with ' 'the one defined inside of salt') meta_obj.name = name return meta_obj def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance. ''' spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', '')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_pod_spec(spec): ''' Converts a dictionary into kubernetes V1PodSpec instance. ''' spec_obj = kubernetes.client.V1PodSpec() for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj def __dict_to_service_spec(spec): ''' Converts a dictionary into kubernetes V1ServiceSpec instance. ''' spec_obj = kubernetes.client.V1ServiceSpec() for key, value in iteritems(spec): # pylint: disable=too-many-nested-blocks if key == 'ports': spec_obj.ports = [] for port in value: kube_port = kubernetes.client.V1ServicePort() if isinstance(port, dict): for port_key, port_value in iteritems(port): if hasattr(kube_port, port_key): setattr(kube_port, port_key, port_value) else: kube_port.port = port spec_obj.ports.append(kube_port) elif hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj
saltstack/salt
salt/utils/virt.py
VirtKey.accept
python
def accept(self, pub): ''' Accept the provided key ''' try: with salt.utils.files.fopen(self.path, 'r') as fp_: expiry = int(fp_.read()) except (OSError, IOError): log.error( 'Request to sign key for minion \'%s\' on hyper \'%s\' ' 'denied: no authorization', self.id, self.hyper ) return False except ValueError: log.error('Invalid expiry data in %s', self.path) return False # Limit acceptance window to 10 minutes # TODO: Move this value to the master config file if (time.time() - expiry) > 600: log.warning( 'Request to sign key for minion "%s" on hyper "%s" denied: ' 'authorization expired', self.id, self.hyper ) return False pubfn = os.path.join(self.opts['pki_dir'], 'minions', self.id) with salt.utils.files.fopen(pubfn, 'w+') as fp_: fp_.write(pub) self.void() return True
Accept the provided key
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/virt.py#L32-L64
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def void(self):\n '''\n Invalidate any existing authorization\n '''\n try:\n os.unlink(self.path)\n return True\n except OSError:\n return False\n" ]
class VirtKey(object): ''' Used to manage key signing requests. ''' def __init__(self, hyper, id_, opts): self.opts = opts self.hyper = hyper self.id = id_ path = os.path.join(self.opts['pki_dir'], 'virtkeys', hyper) if not os.path.isdir(path): os.makedirs(path) self.path = os.path.join(path, id_) def authorize(self): ''' Prepare the master to expect a signing request ''' with salt.utils.files.fopen(self.path, 'w+') as fp_: fp_.write(str(int(time.time()))) # future lint: disable=blacklisted-function return True def void(self): ''' Invalidate any existing authorization ''' try: os.unlink(self.path) return True except OSError: return False
saltstack/salt
salt/utils/virt.py
VirtKey.authorize
python
def authorize(self): ''' Prepare the master to expect a signing request ''' with salt.utils.files.fopen(self.path, 'w+') as fp_: fp_.write(str(int(time.time()))) # future lint: disable=blacklisted-function return True
Prepare the master to expect a signing request
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/virt.py#L66-L72
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n" ]
class VirtKey(object): ''' Used to manage key signing requests. ''' def __init__(self, hyper, id_, opts): self.opts = opts self.hyper = hyper self.id = id_ path = os.path.join(self.opts['pki_dir'], 'virtkeys', hyper) if not os.path.isdir(path): os.makedirs(path) self.path = os.path.join(path, id_) def accept(self, pub): ''' Accept the provided key ''' try: with salt.utils.files.fopen(self.path, 'r') as fp_: expiry = int(fp_.read()) except (OSError, IOError): log.error( 'Request to sign key for minion \'%s\' on hyper \'%s\' ' 'denied: no authorization', self.id, self.hyper ) return False except ValueError: log.error('Invalid expiry data in %s', self.path) return False # Limit acceptance window to 10 minutes # TODO: Move this value to the master config file if (time.time() - expiry) > 600: log.warning( 'Request to sign key for minion "%s" on hyper "%s" denied: ' 'authorization expired', self.id, self.hyper ) return False pubfn = os.path.join(self.opts['pki_dir'], 'minions', self.id) with salt.utils.files.fopen(pubfn, 'w+') as fp_: fp_.write(pub) self.void() return True def void(self): ''' Invalidate any existing authorization ''' try: os.unlink(self.path) return True except OSError: return False
saltstack/salt
salt/ext/backports_abc.py
patch
python
def patch(patch_inspect=True): PATCHED['collections.abc.Generator'] = _collections_abc.Generator = Generator PATCHED['collections.abc.Coroutine'] = _collections_abc.Coroutine = Coroutine PATCHED['collections.abc.Awaitable'] = _collections_abc.Awaitable = Awaitable if patch_inspect: import inspect PATCHED['inspect.isawaitable'] = inspect.isawaitable = isawaitable
Main entry point for patching the ``collections.abc`` and ``inspect`` standard library modules.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/ext/backports_abc.py#L205-L216
null
""" Patch recently added ABCs into the standard lib module ``collections.abc`` (Py3) or ``collections`` (Py2). Usage:: import backports_abc backports_abc.patch() or:: try: from collections.abc import Generator except ImportError: from backports_abc import Generator """ try: import collections.abc as _collections_abc except ImportError: import collections as _collections_abc def get_mro(cls): try: return cls.__mro__ except AttributeError: return old_style_mro(cls) def old_style_mro(cls): yield cls for base in cls.__bases__: for c in old_style_mro(base): yield c def mk_gen(): from abc import abstractmethod required_methods = ( '__iter__', '__next__' if hasattr(iter(()), '__next__') else 'next', 'send', 'throw', 'close') class Generator(_collections_abc.Iterator): __slots__ = () if '__next__' in required_methods: def __next__(self): return self.send(None) else: def next(self): return self.send(None) @abstractmethod def send(self, value): raise StopIteration @abstractmethod def throw(self, typ, val=None, tb=None): if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val def close(self): try: self.throw(GeneratorExit) except (GeneratorExit, StopIteration): pass else: raise RuntimeError('generator ignored GeneratorExit') @classmethod def __subclasshook__(cls, C): if cls is Generator: mro = get_mro(C) for method in required_methods: for base in mro: if method in base.__dict__: break else: return NotImplemented return True return NotImplemented generator = type((lambda: (yield))()) Generator.register(generator) return Generator def mk_awaitable(): from abc import abstractmethod, ABCMeta @abstractmethod def __await__(self): yield @classmethod def __subclasshook__(cls, C): if cls is Awaitable: for B in get_mro(C): if '__await__' in B.__dict__: if B.__dict__['__await__']: return True break return NotImplemented # calling metaclass directly as syntax differs in Py2/Py3 Awaitable = ABCMeta('Awaitable', (), { '__slots__': (), '__await__': __await__, '__subclasshook__': __subclasshook__, }) return Awaitable def mk_coroutine(): from abc import abstractmethod class Coroutine(Awaitable): __slots__ = () @abstractmethod def send(self, value): """Send a value into the coroutine. Return next yielded value or raise StopIteration. """ raise StopIteration @abstractmethod def throw(self, typ, val=None, tb=None): """Raise an exception in the coroutine. Return next yielded value or raise StopIteration. """ if val is None: if tb is None: raise typ val = typ() if tb is not None: val = val.with_traceback(tb) raise val def close(self): """Raise GeneratorExit inside coroutine. """ try: self.throw(GeneratorExit) except (GeneratorExit, StopIteration): pass else: raise RuntimeError('coroutine ignored GeneratorExit') @classmethod def __subclasshook__(cls, C): if cls is Coroutine: mro = get_mro(C) for method in ('__await__', 'send', 'throw', 'close'): for base in mro: if method in base.__dict__: break else: return NotImplemented return True return NotImplemented return Coroutine ### # make all ABCs available in this module try: Generator = _collections_abc.Generator except AttributeError: Generator = mk_gen() try: Awaitable = _collections_abc.Awaitable except AttributeError: Awaitable = mk_awaitable() try: Coroutine = _collections_abc.Coroutine except AttributeError: Coroutine = mk_coroutine() try: from inspect import isawaitable except ImportError: def isawaitable(obj): return isinstance(obj, Awaitable) ### # allow patching the stdlib PATCHED = {}
saltstack/salt
salt/utils/kickstart.py
parse_auth
python
def parse_auth(rule): ''' Parses the auth/authconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) noargs = ('back', 'test', 'nostart', 'kickstart', 'probe', 'enablecache', 'disablecache', 'disablenis', 'enableshadow', 'disableshadow', 'enablemd5', 'disablemd5', 'enableldap', 'enableldapauth', 'enableldaptls', 'disableldap', 'disableldapauth', 'enablekrb5kdcdns', 'disablekrb5kdcdns', 'enablekrb5realmdns', 'disablekrb5realmdns', 'disablekrb5', 'disablehe-siod', 'enablesmbauth', 'disablesmbauth', 'enablewinbind', 'enablewinbindauth', 'disablewinbind', 'disablewinbindauth', 'enablewinbindusedefaultdomain', 'disablewinbindusedefaultdomain', 'enablewins', 'disablewins') for arg in noargs: parser.add_argument('--{0}'.format(arg), dest=arg, action='store_true') parser.add_argument('--enablenis', dest='enablenis', action='store') parser.add_argument('--hesiodrhs', dest='hesiodrhs', action='store') parser.add_argument('--krb5adminserver', dest='krb5adminserver', action='append') parser.add_argument('--krb5kdc', dest='krb5kdc', action='append') parser.add_argument('--ldapbasedn', dest='ldapbasedn', action='store') parser.add_argument('--ldapserver', dest='ldapserver', action='append') parser.add_argument('--nisserver', dest='nisserver', action='append') parser.add_argument('--passalgo', dest='passalgo', action='store') parser.add_argument('--smbidmapgid', dest='smbidmapgid', action='store') parser.add_argument('--smbidmapuid', dest='smbidmapuid', action='store') parser.add_argument('--smbrealm', dest='smbrealm', action='store') parser.add_argument('--smbsecurity', dest='smbsecurity', action='store', choices=['user', 'server', 'domain', 'dns']) parser.add_argument('--smbservers', dest='smbservers', action='store') parser.add_argument('--smbworkgroup', dest='smbworkgroup', action='store') parser.add_argument('--winbindjoin', dest='winbindjoin', action='store') parser.add_argument('--winbindseparator', dest='winbindseparator', action='store') parser.add_argument('--winbindtemplatehomedir', dest='winbindtemplatehomedir', action='store') parser.add_argument('--winbindtemplateprimarygroup', dest='winbindtemplateprimarygroup', action='store') parser.add_argument('--winbindtemplateshell', dest='winbindtemplateshell', action='store') parser.add_argument('--enablekrb5', dest='enablekrb5', action='store_true') if '--enablekrb5' in rules: parser.add_argument('--krb5realm', dest='krb5realm', action='store', required=True) parser.add_argument('--enablehesiod', dest='enablehesiod', action='store_true') if '--enablehesiod' in rules: parser.add_argument('--hesiodlhs', dest='hesiodlhs', action='store', required=True) args = clean_args(vars(parser.parse_args(rules))) parser = None return args
Parses the auth/authconfig line
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L25-L83
[ "def clean_args(args):\n '''\n Cleans up the args that weren't passed in\n '''\n for arg in args:\n if not args[arg]:\n del args[arg]\n return args\n" ]
# -*- coding: utf-8 -*- ''' Utilities for managing kickstart .. versionadded:: Beryllium ''' from __future__ import absolute_import, unicode_literals import shlex import argparse # pylint: disable=minimum-python-version import salt.utils.files import salt.utils.yaml from salt.ext.six.moves import range def clean_args(args): ''' Cleans up the args that weren't passed in ''' for arg in args: if not args[arg]: del args[arg] return args def parse_autopart(rule): ''' Parse the autopart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--type', dest='type', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autostep(rule): ''' Parse the autostep line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--autoscreenshot', dest='autoscreenshot', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_bootloader(rule): ''' Parse the bootloader line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--append', dest='append', action='store') parser.add_argument('--driveorder', dest='driveorder', action='store') parser.add_argument('--location', dest='location', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--md5pass', dest='md5pass', action='store') parser.add_argument('--upgrade', dest='upgrade', action='store_true') parser.add_argument('--timeout', dest='timeout', action='store') parser.add_argument('--boot-drive', dest='bootdrive', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_btrfs(rule): ''' Parse the btrfs line TODO: finish up the weird parsing on this one http://fedoraproject.org/wiki/Anaconda/Kickstart#btrfs ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--data', dest='data', action='store') parser.add_argument('--metadata', dest='metadata', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--subvol', dest='subvol', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_clearpart(rule): ''' Parse the clearpart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--all', dest='all', action='store_true') parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--init_label', dest='init_label', action='store_true') parser.add_argument('--linux', dest='linux', action='store_true') parser.add_argument('--none', dest='none', action='store_true') parser.add_argument('--initlabel', dest='init_label', action='store_true') parser.add_argument('--list', dest='list', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_device(rule): ''' Parse the device line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) modulename = rules.pop(0) parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) args['modulename'] = modulename parser = None return args def parse_dmraid(rule): ''' Parse the dmraid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--dev', dest='dev', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_driverdisk(rule): ''' Parse the driverdisk line ''' if '--' not in rule: return {'partition': rule} parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--source', dest='source', action='store') parser.add_argument('--biospart', dest='biospart', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firewall(rule): ''' Parse the firewall line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--service', dest='service', action='store') parser.add_argument('--ssh', dest='ssh', action='store_true') parser.add_argument('--smtp', dest='smtp', action='store_true') parser.add_argument('--http', dest='http', action='store_true') parser.add_argument('--ftp', dest='ftp', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firstboot(rule): ''' Parse the firstboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--reconfig', dest='reconfig', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_group(rule): ''' Parse the group line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gid', dest='gid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_harddrive(rule): ''' Parse the harddrive line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--biospart', dest='biospart', action='store') parser.add_argument('--partition', dest='partition', action='store') parser.add_argument('--dir', dest='dir', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_ignoredisk(rule): ''' Parse the ignoredisk line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--only-use', dest='only-use', action='store') parser.add_argument('--interactive', dest='interactive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsi(rule): ''' Parse the iscsi line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--ipaddr', dest='ipaddr', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--target', dest='target', action='store') parser.add_argument('--iface', dest='iface', action='store') parser.add_argument('--user', dest='user', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--reverse-user', dest='reverse-user', action='store') parser.add_argument('--reverse-password', dest='reverse-password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsiname(rule): ''' Parse the iscsiname line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) #parser.add_argument('iqn') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_keyboard(rule): ''' Parse the keyboard line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--vckeymap', dest='vckeymap', action='store') parser.add_argument('--xlayouts', dest='xlayouts', action='store') parser.add_argument('--switch', dest='switch', action='store') parser.add_argument('keyboard') args = clean_args(vars(parser.parse_args(rules))) if 'keyboard' in args and 'xlayouts' not in args: args['xlayouts'] = args['keyboard'] parser = None return args def parse_lang(rule): ''' Parse the lang line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('lang') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logvol(rule): ''' Parse the logvol line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--percent', dest='percent', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store_true') parser.add_argument('--name', dest='name', action='store') parser.add_argument('--vgname', dest='vgname', action='store') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--label', dest='label', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logging(rule): ''' Parse the logging line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--level', dest='level', action='store', choices=['debug', 'info', 'warning', 'error', 'critical']) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_monitor(rule): ''' Parse the monitor line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--hsync', dest='hsync', action='store') parser.add_argument('--monitor', dest='monitor', action='store') parser.add_argument('--noprobe', dest='noprobe', action='store_true') parser.add_argument('--vsync', dest='vsync', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_multipath(rule): ''' Parse the multipath line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--rule', dest='rule', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_network(rule): ''' Parse the network line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--bootproto', dest='bootproto', action='store', choices=['dhcp', 'bootp', 'static', 'ibft']) parser.add_argument('--device', dest='device', action='store') parser.add_argument('--ip', dest='ip', action='store') parser.add_argument('--ipv6', dest='ipv6', action='store') parser.add_argument('--gateway', dest='gateway', action='store') parser.add_argument('--nodefroute', dest='nodefroute', action='store_true') parser.add_argument('--nameserver', dest='nameserver', action='store') parser.add_argument('--nodns', dest='nodns', action='store_true') parser.add_argument('--netmask', dest='netmask', action='store') parser.add_argument('--hostname', dest='hostname', action='store') parser.add_argument('--ethtool', dest='ethtool', action='store') parser.add_argument('--essid', dest='essid', action='store') parser.add_argument('--wepkey', dest='wepkey', action='store') parser.add_argument('--wpakey', dest='wpakey', action='store') parser.add_argument('--onboot', dest='onboot', action='store') parser.add_argument('--dhcpclass', dest='dhcpclass', action='store') parser.add_argument('--mtu', dest='mtu', action='store') parser.add_argument('--noipv4', dest='noipv4', action='store_true') parser.add_argument('--noipv6', dest='noipv6', action='store_true') parser.add_argument('--activate', dest='activate', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_nfs(rule): ''' Parse the nfs line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--server', dest='server', action='store') parser.add_argument('--dir', dest='dir', action='store') parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_partition(rule): ''' Parse the partition line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--onpart', '--usepart', dest='onpart', action='store') parser.add_argument('--ondisk', '--ondrive', dest='ondisk', action='store') parser.add_argument('--asprimary', dest='asprimary', action='store_true') parser.add_argument('--fsprofile', dest='fsprofile', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--onbiosdisk', dest='onbiosdisk', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backupphrase', dest='backupphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_raid(rule): ''' Parse the raid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('mntpoint') parser.add_argument('--level', dest='level', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--spares', dest='spares', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_reboot(rule): ''' Parse the reboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--eject', dest='eject', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_repo(rule): ''' Parse the repo line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--baseurl', dest='baseurl', action='store') parser.add_argument('--mirrorlist', dest='mirrorlist', action='store') parser.add_argument('--cost', dest='cost', action='store') parser.add_argument('--excludepkgs', dest='excludepkgs', action='store') parser.add_argument('--includepkgs', dest='includepkgs', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--ignoregroups', dest='ignoregroups', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rescue(rule): ''' Parse the rescue line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--nomount', dest='nomount', action='store_true') parser.add_argument('--romount', dest='romount', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rootpw(rule): ''' Parse the rootpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('password') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_selinux(rule): ''' Parse the selinux line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store_true') parser.add_argument('--enforcing', dest='enforcing', action='store_true') parser.add_argument('--permissive', dest='permissive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_services(rule): ''' Parse the services line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store') parser.add_argument('--enabled', dest='enabled', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_sshpw(rule): ''' Parse the sshpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--username', dest='username', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_timezone(rule): ''' Parse the timezone line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--utc', dest='utc', action='store_true') parser.add_argument('--nontp', dest='nontp', action='store_true') parser.add_argument('--ntpservers', dest='ntpservers', action='store') parser.add_argument('--isUtc', dest='isutc', action='store_true') parser.add_argument('timezone') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True def parse_upgrade(rule): ''' Parse the upgrade line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--root-device', dest='root-device', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None if args: return args return True def parse_url(rule): ''' Parse the url line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--url', dest='url', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_user(rule): ''' Parse the user line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gecos', dest='gecos', action='store') parser.add_argument('--groups', dest='groups', action='store') parser.add_argument('--homedir', dest='homedir', action='store') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--shell', dest='shell', action='store') parser.add_argument('--uid', dest='uid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_vnc(rule): ''' Parse the vnc line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--password', dest='password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_volgroup(rule): ''' Parse the volgroup line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('name') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--pesize', dest='pesize', action='store') parser.add_argument('--reserved-space', dest='reserved-space', action='store') parser.add_argument('--reserved-percent', dest='reserved-percent', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_xconfig(rule): ''' Parse the xconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--defaultdesktop', dest='defaultdesktop', action='store') parser.add_argument('--startxonboot', dest='startxonboot', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_zfcp(rule): ''' Parse the zfcp line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--devnum', dest='devnum', action='store') parser.add_argument('--fcplun', dest='fcplun', action='store') parser.add_argument('--wwpn', dest='wwpn', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def mksls(src, dst=None): ''' Convert a kickstart file to an SLS file ''' mode = 'command' sls = {} ks_opts = {} with salt.utils.files.fopen(src, 'r') as fh_: for line in fh_: if line.startswith('#'): continue if mode == 'command': if line.startswith('auth ') or line.startswith('authconfig '): ks_opts['auth'] = parse_auth(line) elif line.startswith('autopart'): ks_opts['autopath'] = parse_autopart(line) elif line.startswith('autostep'): ks_opts['autostep'] = parse_autostep(line) elif line.startswith('bootloader'): ks_opts['bootloader'] = parse_bootloader(line) elif line.startswith('btrfs'): ks_opts['btrfs'] = parse_btrfs(line) elif line.startswith('cdrom'): ks_opts['cdrom'] = True elif line.startswith('clearpart'): ks_opts['clearpart'] = parse_clearpart(line) elif line.startswith('cmdline'): ks_opts['cmdline'] = True elif line.startswith('device'): ks_opts['device'] = parse_device(line) elif line.startswith('dmraid'): ks_opts['dmraid'] = parse_dmraid(line) elif line.startswith('driverdisk'): ks_opts['driverdisk'] = parse_driverdisk(line) elif line.startswith('firewall'): ks_opts['firewall'] = parse_firewall(line) elif line.startswith('firstboot'): ks_opts['firstboot'] = parse_firstboot(line) elif line.startswith('group'): ks_opts['group'] = parse_group(line) elif line.startswith('graphical'): ks_opts['graphical'] = True elif line.startswith('halt'): ks_opts['halt'] = True elif line.startswith('harddrive'): ks_opts['harddrive'] = True elif line.startswith('ignoredisk'): ks_opts['ignoredisk'] = parse_ignoredisk(line) elif line.startswith('install'): ks_opts['install'] = True elif line.startswith('iscsi'): ks_opts['iscsi'] = parse_iscsi(line) elif line.startswith('iscsiname'): ks_opts['iscsiname'] = parse_iscsiname(line) elif line.startswith('keyboard'): ks_opts['keyboard'] = parse_keyboard(line) elif line.startswith('lang'): ks_opts['lang'] = parse_lang(line) elif line.startswith('logvol'): if 'logvol' not in ks_opts.keys(): ks_opts['logvol'] = [] ks_opts['logvol'].append(parse_logvol(line)) elif line.startswith('logging'): ks_opts['logging'] = parse_logging(line) elif line.startswith('mediacheck'): ks_opts['mediacheck'] = True elif line.startswith('monitor'): ks_opts['monitor'] = parse_monitor(line) elif line.startswith('multipath'): ks_opts['multipath'] = parse_multipath(line) elif line.startswith('network'): if 'network' not in ks_opts.keys(): ks_opts['network'] = [] ks_opts['network'].append(parse_network(line)) elif line.startswith('nfs'): ks_opts['nfs'] = True elif line.startswith('part ') or line.startswith('partition'): if 'part' not in ks_opts.keys(): ks_opts['part'] = [] ks_opts['part'].append(parse_partition(line)) elif line.startswith('poweroff'): ks_opts['poweroff'] = True elif line.startswith('raid'): if 'raid' not in ks_opts.keys(): ks_opts['raid'] = [] ks_opts['raid'].append(parse_raid(line)) elif line.startswith('reboot'): ks_opts['reboot'] = parse_reboot(line) elif line.startswith('repo'): ks_opts['repo'] = parse_repo(line) elif line.startswith('rescue'): ks_opts['rescue'] = parse_rescue(line) elif line.startswith('rootpw'): ks_opts['rootpw'] = parse_rootpw(line) elif line.startswith('selinux'): ks_opts['selinux'] = parse_selinux(line) elif line.startswith('services'): ks_opts['services'] = parse_services(line) elif line.startswith('shutdown'): ks_opts['shutdown'] = True elif line.startswith('sshpw'): ks_opts['sshpw'] = parse_sshpw(line) elif line.startswith('skipx'): ks_opts['skipx'] = True elif line.startswith('text'): ks_opts['text'] = True elif line.startswith('timezone'): ks_opts['timezone'] = parse_timezone(line) elif line.startswith('updates'): ks_opts['updates'] = parse_updates(line) elif line.startswith('upgrade'): ks_opts['upgrade'] = parse_upgrade(line) elif line.startswith('url'): ks_opts['url'] = True elif line.startswith('user'): ks_opts['user'] = parse_user(line) elif line.startswith('vnc'): ks_opts['vnc'] = parse_vnc(line) elif line.startswith('volgroup'): ks_opts['volgroup'] = parse_volgroup(line) elif line.startswith('xconfig'): ks_opts['xconfig'] = parse_xconfig(line) elif line.startswith('zerombr'): ks_opts['zerombr'] = True elif line.startswith('zfcp'): ks_opts['zfcp'] = parse_zfcp(line) if line.startswith('%include'): rules = shlex.split(line) if not ks_opts['include']: ks_opts['include'] = [] ks_opts['include'].append(rules[1]) if line.startswith('%ksappend'): rules = shlex.split(line) if not ks_opts['ksappend']: ks_opts['ksappend'] = [] ks_opts['ksappend'].append(rules[1]) if line.startswith('%packages'): mode = 'packages' if 'packages' not in ks_opts.keys(): ks_opts['packages'] = {'packages': {}} parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--default', dest='default', action='store_true') parser.add_argument('--excludedocs', dest='excludedocs', action='store_true') parser.add_argument('--ignoremissing', dest='ignoremissing', action='store_true') parser.add_argument('--instLangs', dest='instLangs', action='store') parser.add_argument('--multilib', dest='multilib', action='store_true') parser.add_argument('--nodefaults', dest='nodefaults', action='store_true') parser.add_argument('--optional', dest='optional', action='store_true') parser.add_argument('--nobase', dest='nobase', action='store_true') args = clean_args(vars(parser.parse_args(opts))) ks_opts['packages']['options'] = args continue if line.startswith('%pre'): mode = 'pre' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['pre'] = {'options': args, 'script': ''} continue if line.startswith('%post'): mode = 'post' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--nochroot', dest='nochroot', action='store_true') parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['post'] = {'options': args, 'script': ''} continue if line.startswith('%end'): mode = None if mode == 'packages': if line.startswith('-'): package = line.replace('-', '', 1).strip() ks_opts['packages']['packages'][package] = False else: ks_opts['packages']['packages'][line.strip()] = True if mode == 'pre': ks_opts['pre']['script'] += line if mode == 'post': ks_opts['post']['script'] += line # Set language sls[ks_opts['lang']['lang']] = {'locale': ['system']} # Set keyboard sls[ks_opts['keyboard']['xlayouts']] = {'keyboard': ['system']} # Set timezone sls[ks_opts['timezone']['timezone']] = {'timezone': ['system']} if 'utc' in ks_opts['timezone'].keys(): sls[ks_opts['timezone']['timezone']]['timezone'].append('utc') # Set network if 'network' in ks_opts.keys(): for interface in ks_opts['network']: device = interface.get('device', None) if device is not None: del interface['device'] sls[device] = {'proto': interface['bootproto']} del interface['bootproto'] if 'onboot' in interface.keys(): if 'no' in interface['onboot']: sls[device]['enabled'] = False else: sls[device]['enabled'] = True del interface['onboot'] if 'noipv4' in interface.keys(): sls[device]['ipv4'] = {'enabled': False} del interface['noipv4'] if 'noipv6' in interface.keys(): sls[device]['ipv6'] = {'enabled': False} del interface['noipv6'] for option in interface: if type(interface[option]) is bool: sls[device][option] = {'enabled': [interface[option]]} else: sls[device][option] = interface[option] if 'hostname' in interface: sls['system'] = { 'network.system': { 'enabled': True, 'hostname': interface['hostname'], 'apply_hostname': True, } } # Set selinux if 'selinux' in ks_opts.keys(): for mode in ks_opts['selinux']: sls[mode] = {'selinux': ['mode']} # Get package data together if 'nobase' not in ks_opts['packages']['options']: sls['base'] = {'pkg_group': ['installed']} packages = ks_opts['packages']['packages'] for package in packages: if not packages[package]: continue if package and packages[package] is True: if package.startswith('@'): pkg_group = package.replace('@', '', 1) sls[pkg_group] = {'pkg_group': ['installed']} else: sls[package] = {'pkg': ['installed']} elif packages[package] is False: sls[package] = {'pkg': ['absent']} if dst: with salt.utils.files.fopen(dst, 'w') as fp_: salt.utils.yaml.safe_dump(sls, fp_, default_flow_style=False) else: return salt.utils.yaml.safe_dump(sls, default_flow_style=False)
saltstack/salt
salt/utils/kickstart.py
parse_iscsiname
python
def parse_iscsiname(rule): ''' Parse the iscsiname line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) #parser.add_argument('iqn') args = clean_args(vars(parser.parse_args(rules))) parser = None return args
Parse the iscsiname line
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L345-L356
[ "def clean_args(args):\n '''\n Cleans up the args that weren't passed in\n '''\n for arg in args:\n if not args[arg]:\n del args[arg]\n return args\n" ]
# -*- coding: utf-8 -*- ''' Utilities for managing kickstart .. versionadded:: Beryllium ''' from __future__ import absolute_import, unicode_literals import shlex import argparse # pylint: disable=minimum-python-version import salt.utils.files import salt.utils.yaml from salt.ext.six.moves import range def clean_args(args): ''' Cleans up the args that weren't passed in ''' for arg in args: if not args[arg]: del args[arg] return args def parse_auth(rule): ''' Parses the auth/authconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) noargs = ('back', 'test', 'nostart', 'kickstart', 'probe', 'enablecache', 'disablecache', 'disablenis', 'enableshadow', 'disableshadow', 'enablemd5', 'disablemd5', 'enableldap', 'enableldapauth', 'enableldaptls', 'disableldap', 'disableldapauth', 'enablekrb5kdcdns', 'disablekrb5kdcdns', 'enablekrb5realmdns', 'disablekrb5realmdns', 'disablekrb5', 'disablehe-siod', 'enablesmbauth', 'disablesmbauth', 'enablewinbind', 'enablewinbindauth', 'disablewinbind', 'disablewinbindauth', 'enablewinbindusedefaultdomain', 'disablewinbindusedefaultdomain', 'enablewins', 'disablewins') for arg in noargs: parser.add_argument('--{0}'.format(arg), dest=arg, action='store_true') parser.add_argument('--enablenis', dest='enablenis', action='store') parser.add_argument('--hesiodrhs', dest='hesiodrhs', action='store') parser.add_argument('--krb5adminserver', dest='krb5adminserver', action='append') parser.add_argument('--krb5kdc', dest='krb5kdc', action='append') parser.add_argument('--ldapbasedn', dest='ldapbasedn', action='store') parser.add_argument('--ldapserver', dest='ldapserver', action='append') parser.add_argument('--nisserver', dest='nisserver', action='append') parser.add_argument('--passalgo', dest='passalgo', action='store') parser.add_argument('--smbidmapgid', dest='smbidmapgid', action='store') parser.add_argument('--smbidmapuid', dest='smbidmapuid', action='store') parser.add_argument('--smbrealm', dest='smbrealm', action='store') parser.add_argument('--smbsecurity', dest='smbsecurity', action='store', choices=['user', 'server', 'domain', 'dns']) parser.add_argument('--smbservers', dest='smbservers', action='store') parser.add_argument('--smbworkgroup', dest='smbworkgroup', action='store') parser.add_argument('--winbindjoin', dest='winbindjoin', action='store') parser.add_argument('--winbindseparator', dest='winbindseparator', action='store') parser.add_argument('--winbindtemplatehomedir', dest='winbindtemplatehomedir', action='store') parser.add_argument('--winbindtemplateprimarygroup', dest='winbindtemplateprimarygroup', action='store') parser.add_argument('--winbindtemplateshell', dest='winbindtemplateshell', action='store') parser.add_argument('--enablekrb5', dest='enablekrb5', action='store_true') if '--enablekrb5' in rules: parser.add_argument('--krb5realm', dest='krb5realm', action='store', required=True) parser.add_argument('--enablehesiod', dest='enablehesiod', action='store_true') if '--enablehesiod' in rules: parser.add_argument('--hesiodlhs', dest='hesiodlhs', action='store', required=True) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autopart(rule): ''' Parse the autopart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--type', dest='type', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autostep(rule): ''' Parse the autostep line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--autoscreenshot', dest='autoscreenshot', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_bootloader(rule): ''' Parse the bootloader line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--append', dest='append', action='store') parser.add_argument('--driveorder', dest='driveorder', action='store') parser.add_argument('--location', dest='location', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--md5pass', dest='md5pass', action='store') parser.add_argument('--upgrade', dest='upgrade', action='store_true') parser.add_argument('--timeout', dest='timeout', action='store') parser.add_argument('--boot-drive', dest='bootdrive', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_btrfs(rule): ''' Parse the btrfs line TODO: finish up the weird parsing on this one http://fedoraproject.org/wiki/Anaconda/Kickstart#btrfs ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--data', dest='data', action='store') parser.add_argument('--metadata', dest='metadata', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--subvol', dest='subvol', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_clearpart(rule): ''' Parse the clearpart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--all', dest='all', action='store_true') parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--init_label', dest='init_label', action='store_true') parser.add_argument('--linux', dest='linux', action='store_true') parser.add_argument('--none', dest='none', action='store_true') parser.add_argument('--initlabel', dest='init_label', action='store_true') parser.add_argument('--list', dest='list', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_device(rule): ''' Parse the device line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) modulename = rules.pop(0) parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) args['modulename'] = modulename parser = None return args def parse_dmraid(rule): ''' Parse the dmraid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--dev', dest='dev', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_driverdisk(rule): ''' Parse the driverdisk line ''' if '--' not in rule: return {'partition': rule} parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--source', dest='source', action='store') parser.add_argument('--biospart', dest='biospart', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firewall(rule): ''' Parse the firewall line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--service', dest='service', action='store') parser.add_argument('--ssh', dest='ssh', action='store_true') parser.add_argument('--smtp', dest='smtp', action='store_true') parser.add_argument('--http', dest='http', action='store_true') parser.add_argument('--ftp', dest='ftp', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firstboot(rule): ''' Parse the firstboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--reconfig', dest='reconfig', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_group(rule): ''' Parse the group line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gid', dest='gid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_harddrive(rule): ''' Parse the harddrive line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--biospart', dest='biospart', action='store') parser.add_argument('--partition', dest='partition', action='store') parser.add_argument('--dir', dest='dir', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_ignoredisk(rule): ''' Parse the ignoredisk line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--only-use', dest='only-use', action='store') parser.add_argument('--interactive', dest='interactive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsi(rule): ''' Parse the iscsi line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--ipaddr', dest='ipaddr', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--target', dest='target', action='store') parser.add_argument('--iface', dest='iface', action='store') parser.add_argument('--user', dest='user', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--reverse-user', dest='reverse-user', action='store') parser.add_argument('--reverse-password', dest='reverse-password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_keyboard(rule): ''' Parse the keyboard line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--vckeymap', dest='vckeymap', action='store') parser.add_argument('--xlayouts', dest='xlayouts', action='store') parser.add_argument('--switch', dest='switch', action='store') parser.add_argument('keyboard') args = clean_args(vars(parser.parse_args(rules))) if 'keyboard' in args and 'xlayouts' not in args: args['xlayouts'] = args['keyboard'] parser = None return args def parse_lang(rule): ''' Parse the lang line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('lang') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logvol(rule): ''' Parse the logvol line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--percent', dest='percent', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store_true') parser.add_argument('--name', dest='name', action='store') parser.add_argument('--vgname', dest='vgname', action='store') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--label', dest='label', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logging(rule): ''' Parse the logging line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--level', dest='level', action='store', choices=['debug', 'info', 'warning', 'error', 'critical']) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_monitor(rule): ''' Parse the monitor line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--hsync', dest='hsync', action='store') parser.add_argument('--monitor', dest='monitor', action='store') parser.add_argument('--noprobe', dest='noprobe', action='store_true') parser.add_argument('--vsync', dest='vsync', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_multipath(rule): ''' Parse the multipath line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--rule', dest='rule', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_network(rule): ''' Parse the network line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--bootproto', dest='bootproto', action='store', choices=['dhcp', 'bootp', 'static', 'ibft']) parser.add_argument('--device', dest='device', action='store') parser.add_argument('--ip', dest='ip', action='store') parser.add_argument('--ipv6', dest='ipv6', action='store') parser.add_argument('--gateway', dest='gateway', action='store') parser.add_argument('--nodefroute', dest='nodefroute', action='store_true') parser.add_argument('--nameserver', dest='nameserver', action='store') parser.add_argument('--nodns', dest='nodns', action='store_true') parser.add_argument('--netmask', dest='netmask', action='store') parser.add_argument('--hostname', dest='hostname', action='store') parser.add_argument('--ethtool', dest='ethtool', action='store') parser.add_argument('--essid', dest='essid', action='store') parser.add_argument('--wepkey', dest='wepkey', action='store') parser.add_argument('--wpakey', dest='wpakey', action='store') parser.add_argument('--onboot', dest='onboot', action='store') parser.add_argument('--dhcpclass', dest='dhcpclass', action='store') parser.add_argument('--mtu', dest='mtu', action='store') parser.add_argument('--noipv4', dest='noipv4', action='store_true') parser.add_argument('--noipv6', dest='noipv6', action='store_true') parser.add_argument('--activate', dest='activate', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_nfs(rule): ''' Parse the nfs line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--server', dest='server', action='store') parser.add_argument('--dir', dest='dir', action='store') parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_partition(rule): ''' Parse the partition line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--onpart', '--usepart', dest='onpart', action='store') parser.add_argument('--ondisk', '--ondrive', dest='ondisk', action='store') parser.add_argument('--asprimary', dest='asprimary', action='store_true') parser.add_argument('--fsprofile', dest='fsprofile', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--onbiosdisk', dest='onbiosdisk', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backupphrase', dest='backupphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_raid(rule): ''' Parse the raid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('mntpoint') parser.add_argument('--level', dest='level', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--spares', dest='spares', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_reboot(rule): ''' Parse the reboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--eject', dest='eject', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_repo(rule): ''' Parse the repo line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--baseurl', dest='baseurl', action='store') parser.add_argument('--mirrorlist', dest='mirrorlist', action='store') parser.add_argument('--cost', dest='cost', action='store') parser.add_argument('--excludepkgs', dest='excludepkgs', action='store') parser.add_argument('--includepkgs', dest='includepkgs', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--ignoregroups', dest='ignoregroups', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rescue(rule): ''' Parse the rescue line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--nomount', dest='nomount', action='store_true') parser.add_argument('--romount', dest='romount', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rootpw(rule): ''' Parse the rootpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('password') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_selinux(rule): ''' Parse the selinux line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store_true') parser.add_argument('--enforcing', dest='enforcing', action='store_true') parser.add_argument('--permissive', dest='permissive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_services(rule): ''' Parse the services line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store') parser.add_argument('--enabled', dest='enabled', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_sshpw(rule): ''' Parse the sshpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--username', dest='username', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_timezone(rule): ''' Parse the timezone line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--utc', dest='utc', action='store_true') parser.add_argument('--nontp', dest='nontp', action='store_true') parser.add_argument('--ntpservers', dest='ntpservers', action='store') parser.add_argument('--isUtc', dest='isutc', action='store_true') parser.add_argument('timezone') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True def parse_upgrade(rule): ''' Parse the upgrade line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--root-device', dest='root-device', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None if args: return args return True def parse_url(rule): ''' Parse the url line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--url', dest='url', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_user(rule): ''' Parse the user line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gecos', dest='gecos', action='store') parser.add_argument('--groups', dest='groups', action='store') parser.add_argument('--homedir', dest='homedir', action='store') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--shell', dest='shell', action='store') parser.add_argument('--uid', dest='uid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_vnc(rule): ''' Parse the vnc line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--password', dest='password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_volgroup(rule): ''' Parse the volgroup line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('name') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--pesize', dest='pesize', action='store') parser.add_argument('--reserved-space', dest='reserved-space', action='store') parser.add_argument('--reserved-percent', dest='reserved-percent', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_xconfig(rule): ''' Parse the xconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--defaultdesktop', dest='defaultdesktop', action='store') parser.add_argument('--startxonboot', dest='startxonboot', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_zfcp(rule): ''' Parse the zfcp line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--devnum', dest='devnum', action='store') parser.add_argument('--fcplun', dest='fcplun', action='store') parser.add_argument('--wwpn', dest='wwpn', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def mksls(src, dst=None): ''' Convert a kickstart file to an SLS file ''' mode = 'command' sls = {} ks_opts = {} with salt.utils.files.fopen(src, 'r') as fh_: for line in fh_: if line.startswith('#'): continue if mode == 'command': if line.startswith('auth ') or line.startswith('authconfig '): ks_opts['auth'] = parse_auth(line) elif line.startswith('autopart'): ks_opts['autopath'] = parse_autopart(line) elif line.startswith('autostep'): ks_opts['autostep'] = parse_autostep(line) elif line.startswith('bootloader'): ks_opts['bootloader'] = parse_bootloader(line) elif line.startswith('btrfs'): ks_opts['btrfs'] = parse_btrfs(line) elif line.startswith('cdrom'): ks_opts['cdrom'] = True elif line.startswith('clearpart'): ks_opts['clearpart'] = parse_clearpart(line) elif line.startswith('cmdline'): ks_opts['cmdline'] = True elif line.startswith('device'): ks_opts['device'] = parse_device(line) elif line.startswith('dmraid'): ks_opts['dmraid'] = parse_dmraid(line) elif line.startswith('driverdisk'): ks_opts['driverdisk'] = parse_driverdisk(line) elif line.startswith('firewall'): ks_opts['firewall'] = parse_firewall(line) elif line.startswith('firstboot'): ks_opts['firstboot'] = parse_firstboot(line) elif line.startswith('group'): ks_opts['group'] = parse_group(line) elif line.startswith('graphical'): ks_opts['graphical'] = True elif line.startswith('halt'): ks_opts['halt'] = True elif line.startswith('harddrive'): ks_opts['harddrive'] = True elif line.startswith('ignoredisk'): ks_opts['ignoredisk'] = parse_ignoredisk(line) elif line.startswith('install'): ks_opts['install'] = True elif line.startswith('iscsi'): ks_opts['iscsi'] = parse_iscsi(line) elif line.startswith('iscsiname'): ks_opts['iscsiname'] = parse_iscsiname(line) elif line.startswith('keyboard'): ks_opts['keyboard'] = parse_keyboard(line) elif line.startswith('lang'): ks_opts['lang'] = parse_lang(line) elif line.startswith('logvol'): if 'logvol' not in ks_opts.keys(): ks_opts['logvol'] = [] ks_opts['logvol'].append(parse_logvol(line)) elif line.startswith('logging'): ks_opts['logging'] = parse_logging(line) elif line.startswith('mediacheck'): ks_opts['mediacheck'] = True elif line.startswith('monitor'): ks_opts['monitor'] = parse_monitor(line) elif line.startswith('multipath'): ks_opts['multipath'] = parse_multipath(line) elif line.startswith('network'): if 'network' not in ks_opts.keys(): ks_opts['network'] = [] ks_opts['network'].append(parse_network(line)) elif line.startswith('nfs'): ks_opts['nfs'] = True elif line.startswith('part ') or line.startswith('partition'): if 'part' not in ks_opts.keys(): ks_opts['part'] = [] ks_opts['part'].append(parse_partition(line)) elif line.startswith('poweroff'): ks_opts['poweroff'] = True elif line.startswith('raid'): if 'raid' not in ks_opts.keys(): ks_opts['raid'] = [] ks_opts['raid'].append(parse_raid(line)) elif line.startswith('reboot'): ks_opts['reboot'] = parse_reboot(line) elif line.startswith('repo'): ks_opts['repo'] = parse_repo(line) elif line.startswith('rescue'): ks_opts['rescue'] = parse_rescue(line) elif line.startswith('rootpw'): ks_opts['rootpw'] = parse_rootpw(line) elif line.startswith('selinux'): ks_opts['selinux'] = parse_selinux(line) elif line.startswith('services'): ks_opts['services'] = parse_services(line) elif line.startswith('shutdown'): ks_opts['shutdown'] = True elif line.startswith('sshpw'): ks_opts['sshpw'] = parse_sshpw(line) elif line.startswith('skipx'): ks_opts['skipx'] = True elif line.startswith('text'): ks_opts['text'] = True elif line.startswith('timezone'): ks_opts['timezone'] = parse_timezone(line) elif line.startswith('updates'): ks_opts['updates'] = parse_updates(line) elif line.startswith('upgrade'): ks_opts['upgrade'] = parse_upgrade(line) elif line.startswith('url'): ks_opts['url'] = True elif line.startswith('user'): ks_opts['user'] = parse_user(line) elif line.startswith('vnc'): ks_opts['vnc'] = parse_vnc(line) elif line.startswith('volgroup'): ks_opts['volgroup'] = parse_volgroup(line) elif line.startswith('xconfig'): ks_opts['xconfig'] = parse_xconfig(line) elif line.startswith('zerombr'): ks_opts['zerombr'] = True elif line.startswith('zfcp'): ks_opts['zfcp'] = parse_zfcp(line) if line.startswith('%include'): rules = shlex.split(line) if not ks_opts['include']: ks_opts['include'] = [] ks_opts['include'].append(rules[1]) if line.startswith('%ksappend'): rules = shlex.split(line) if not ks_opts['ksappend']: ks_opts['ksappend'] = [] ks_opts['ksappend'].append(rules[1]) if line.startswith('%packages'): mode = 'packages' if 'packages' not in ks_opts.keys(): ks_opts['packages'] = {'packages': {}} parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--default', dest='default', action='store_true') parser.add_argument('--excludedocs', dest='excludedocs', action='store_true') parser.add_argument('--ignoremissing', dest='ignoremissing', action='store_true') parser.add_argument('--instLangs', dest='instLangs', action='store') parser.add_argument('--multilib', dest='multilib', action='store_true') parser.add_argument('--nodefaults', dest='nodefaults', action='store_true') parser.add_argument('--optional', dest='optional', action='store_true') parser.add_argument('--nobase', dest='nobase', action='store_true') args = clean_args(vars(parser.parse_args(opts))) ks_opts['packages']['options'] = args continue if line.startswith('%pre'): mode = 'pre' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['pre'] = {'options': args, 'script': ''} continue if line.startswith('%post'): mode = 'post' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--nochroot', dest='nochroot', action='store_true') parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['post'] = {'options': args, 'script': ''} continue if line.startswith('%end'): mode = None if mode == 'packages': if line.startswith('-'): package = line.replace('-', '', 1).strip() ks_opts['packages']['packages'][package] = False else: ks_opts['packages']['packages'][line.strip()] = True if mode == 'pre': ks_opts['pre']['script'] += line if mode == 'post': ks_opts['post']['script'] += line # Set language sls[ks_opts['lang']['lang']] = {'locale': ['system']} # Set keyboard sls[ks_opts['keyboard']['xlayouts']] = {'keyboard': ['system']} # Set timezone sls[ks_opts['timezone']['timezone']] = {'timezone': ['system']} if 'utc' in ks_opts['timezone'].keys(): sls[ks_opts['timezone']['timezone']]['timezone'].append('utc') # Set network if 'network' in ks_opts.keys(): for interface in ks_opts['network']: device = interface.get('device', None) if device is not None: del interface['device'] sls[device] = {'proto': interface['bootproto']} del interface['bootproto'] if 'onboot' in interface.keys(): if 'no' in interface['onboot']: sls[device]['enabled'] = False else: sls[device]['enabled'] = True del interface['onboot'] if 'noipv4' in interface.keys(): sls[device]['ipv4'] = {'enabled': False} del interface['noipv4'] if 'noipv6' in interface.keys(): sls[device]['ipv6'] = {'enabled': False} del interface['noipv6'] for option in interface: if type(interface[option]) is bool: sls[device][option] = {'enabled': [interface[option]]} else: sls[device][option] = interface[option] if 'hostname' in interface: sls['system'] = { 'network.system': { 'enabled': True, 'hostname': interface['hostname'], 'apply_hostname': True, } } # Set selinux if 'selinux' in ks_opts.keys(): for mode in ks_opts['selinux']: sls[mode] = {'selinux': ['mode']} # Get package data together if 'nobase' not in ks_opts['packages']['options']: sls['base'] = {'pkg_group': ['installed']} packages = ks_opts['packages']['packages'] for package in packages: if not packages[package]: continue if package and packages[package] is True: if package.startswith('@'): pkg_group = package.replace('@', '', 1) sls[pkg_group] = {'pkg_group': ['installed']} else: sls[package] = {'pkg': ['installed']} elif packages[package] is False: sls[package] = {'pkg': ['absent']} if dst: with salt.utils.files.fopen(dst, 'w') as fp_: salt.utils.yaml.safe_dump(sls, fp_, default_flow_style=False) else: return salt.utils.yaml.safe_dump(sls, default_flow_style=False)
saltstack/salt
salt/utils/kickstart.py
parse_partition
python
def parse_partition(rule): ''' Parse the partition line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--onpart', '--usepart', dest='onpart', action='store') parser.add_argument('--ondisk', '--ondrive', dest='ondisk', action='store') parser.add_argument('--asprimary', dest='asprimary', action='store_true') parser.add_argument('--fsprofile', dest='fsprofile', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--onbiosdisk', dest='onbiosdisk', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backupphrase', dest='backupphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args
Parse the partition line
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L528-L557
[ "def clean_args(args):\n '''\n Cleans up the args that weren't passed in\n '''\n for arg in args:\n if not args[arg]:\n del args[arg]\n return args\n" ]
# -*- coding: utf-8 -*- ''' Utilities for managing kickstart .. versionadded:: Beryllium ''' from __future__ import absolute_import, unicode_literals import shlex import argparse # pylint: disable=minimum-python-version import salt.utils.files import salt.utils.yaml from salt.ext.six.moves import range def clean_args(args): ''' Cleans up the args that weren't passed in ''' for arg in args: if not args[arg]: del args[arg] return args def parse_auth(rule): ''' Parses the auth/authconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) noargs = ('back', 'test', 'nostart', 'kickstart', 'probe', 'enablecache', 'disablecache', 'disablenis', 'enableshadow', 'disableshadow', 'enablemd5', 'disablemd5', 'enableldap', 'enableldapauth', 'enableldaptls', 'disableldap', 'disableldapauth', 'enablekrb5kdcdns', 'disablekrb5kdcdns', 'enablekrb5realmdns', 'disablekrb5realmdns', 'disablekrb5', 'disablehe-siod', 'enablesmbauth', 'disablesmbauth', 'enablewinbind', 'enablewinbindauth', 'disablewinbind', 'disablewinbindauth', 'enablewinbindusedefaultdomain', 'disablewinbindusedefaultdomain', 'enablewins', 'disablewins') for arg in noargs: parser.add_argument('--{0}'.format(arg), dest=arg, action='store_true') parser.add_argument('--enablenis', dest='enablenis', action='store') parser.add_argument('--hesiodrhs', dest='hesiodrhs', action='store') parser.add_argument('--krb5adminserver', dest='krb5adminserver', action='append') parser.add_argument('--krb5kdc', dest='krb5kdc', action='append') parser.add_argument('--ldapbasedn', dest='ldapbasedn', action='store') parser.add_argument('--ldapserver', dest='ldapserver', action='append') parser.add_argument('--nisserver', dest='nisserver', action='append') parser.add_argument('--passalgo', dest='passalgo', action='store') parser.add_argument('--smbidmapgid', dest='smbidmapgid', action='store') parser.add_argument('--smbidmapuid', dest='smbidmapuid', action='store') parser.add_argument('--smbrealm', dest='smbrealm', action='store') parser.add_argument('--smbsecurity', dest='smbsecurity', action='store', choices=['user', 'server', 'domain', 'dns']) parser.add_argument('--smbservers', dest='smbservers', action='store') parser.add_argument('--smbworkgroup', dest='smbworkgroup', action='store') parser.add_argument('--winbindjoin', dest='winbindjoin', action='store') parser.add_argument('--winbindseparator', dest='winbindseparator', action='store') parser.add_argument('--winbindtemplatehomedir', dest='winbindtemplatehomedir', action='store') parser.add_argument('--winbindtemplateprimarygroup', dest='winbindtemplateprimarygroup', action='store') parser.add_argument('--winbindtemplateshell', dest='winbindtemplateshell', action='store') parser.add_argument('--enablekrb5', dest='enablekrb5', action='store_true') if '--enablekrb5' in rules: parser.add_argument('--krb5realm', dest='krb5realm', action='store', required=True) parser.add_argument('--enablehesiod', dest='enablehesiod', action='store_true') if '--enablehesiod' in rules: parser.add_argument('--hesiodlhs', dest='hesiodlhs', action='store', required=True) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autopart(rule): ''' Parse the autopart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--type', dest='type', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autostep(rule): ''' Parse the autostep line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--autoscreenshot', dest='autoscreenshot', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_bootloader(rule): ''' Parse the bootloader line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--append', dest='append', action='store') parser.add_argument('--driveorder', dest='driveorder', action='store') parser.add_argument('--location', dest='location', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--md5pass', dest='md5pass', action='store') parser.add_argument('--upgrade', dest='upgrade', action='store_true') parser.add_argument('--timeout', dest='timeout', action='store') parser.add_argument('--boot-drive', dest='bootdrive', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_btrfs(rule): ''' Parse the btrfs line TODO: finish up the weird parsing on this one http://fedoraproject.org/wiki/Anaconda/Kickstart#btrfs ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--data', dest='data', action='store') parser.add_argument('--metadata', dest='metadata', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--subvol', dest='subvol', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_clearpart(rule): ''' Parse the clearpart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--all', dest='all', action='store_true') parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--init_label', dest='init_label', action='store_true') parser.add_argument('--linux', dest='linux', action='store_true') parser.add_argument('--none', dest='none', action='store_true') parser.add_argument('--initlabel', dest='init_label', action='store_true') parser.add_argument('--list', dest='list', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_device(rule): ''' Parse the device line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) modulename = rules.pop(0) parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) args['modulename'] = modulename parser = None return args def parse_dmraid(rule): ''' Parse the dmraid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--dev', dest='dev', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_driverdisk(rule): ''' Parse the driverdisk line ''' if '--' not in rule: return {'partition': rule} parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--source', dest='source', action='store') parser.add_argument('--biospart', dest='biospart', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firewall(rule): ''' Parse the firewall line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--service', dest='service', action='store') parser.add_argument('--ssh', dest='ssh', action='store_true') parser.add_argument('--smtp', dest='smtp', action='store_true') parser.add_argument('--http', dest='http', action='store_true') parser.add_argument('--ftp', dest='ftp', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firstboot(rule): ''' Parse the firstboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--reconfig', dest='reconfig', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_group(rule): ''' Parse the group line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gid', dest='gid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_harddrive(rule): ''' Parse the harddrive line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--biospart', dest='biospart', action='store') parser.add_argument('--partition', dest='partition', action='store') parser.add_argument('--dir', dest='dir', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_ignoredisk(rule): ''' Parse the ignoredisk line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--only-use', dest='only-use', action='store') parser.add_argument('--interactive', dest='interactive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsi(rule): ''' Parse the iscsi line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--ipaddr', dest='ipaddr', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--target', dest='target', action='store') parser.add_argument('--iface', dest='iface', action='store') parser.add_argument('--user', dest='user', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--reverse-user', dest='reverse-user', action='store') parser.add_argument('--reverse-password', dest='reverse-password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsiname(rule): ''' Parse the iscsiname line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) #parser.add_argument('iqn') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_keyboard(rule): ''' Parse the keyboard line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--vckeymap', dest='vckeymap', action='store') parser.add_argument('--xlayouts', dest='xlayouts', action='store') parser.add_argument('--switch', dest='switch', action='store') parser.add_argument('keyboard') args = clean_args(vars(parser.parse_args(rules))) if 'keyboard' in args and 'xlayouts' not in args: args['xlayouts'] = args['keyboard'] parser = None return args def parse_lang(rule): ''' Parse the lang line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('lang') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logvol(rule): ''' Parse the logvol line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--percent', dest='percent', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store_true') parser.add_argument('--name', dest='name', action='store') parser.add_argument('--vgname', dest='vgname', action='store') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--label', dest='label', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logging(rule): ''' Parse the logging line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--level', dest='level', action='store', choices=['debug', 'info', 'warning', 'error', 'critical']) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_monitor(rule): ''' Parse the monitor line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--hsync', dest='hsync', action='store') parser.add_argument('--monitor', dest='monitor', action='store') parser.add_argument('--noprobe', dest='noprobe', action='store_true') parser.add_argument('--vsync', dest='vsync', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_multipath(rule): ''' Parse the multipath line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--rule', dest='rule', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_network(rule): ''' Parse the network line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--bootproto', dest='bootproto', action='store', choices=['dhcp', 'bootp', 'static', 'ibft']) parser.add_argument('--device', dest='device', action='store') parser.add_argument('--ip', dest='ip', action='store') parser.add_argument('--ipv6', dest='ipv6', action='store') parser.add_argument('--gateway', dest='gateway', action='store') parser.add_argument('--nodefroute', dest='nodefroute', action='store_true') parser.add_argument('--nameserver', dest='nameserver', action='store') parser.add_argument('--nodns', dest='nodns', action='store_true') parser.add_argument('--netmask', dest='netmask', action='store') parser.add_argument('--hostname', dest='hostname', action='store') parser.add_argument('--ethtool', dest='ethtool', action='store') parser.add_argument('--essid', dest='essid', action='store') parser.add_argument('--wepkey', dest='wepkey', action='store') parser.add_argument('--wpakey', dest='wpakey', action='store') parser.add_argument('--onboot', dest='onboot', action='store') parser.add_argument('--dhcpclass', dest='dhcpclass', action='store') parser.add_argument('--mtu', dest='mtu', action='store') parser.add_argument('--noipv4', dest='noipv4', action='store_true') parser.add_argument('--noipv6', dest='noipv6', action='store_true') parser.add_argument('--activate', dest='activate', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_nfs(rule): ''' Parse the nfs line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--server', dest='server', action='store') parser.add_argument('--dir', dest='dir', action='store') parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_raid(rule): ''' Parse the raid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('mntpoint') parser.add_argument('--level', dest='level', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--spares', dest='spares', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_reboot(rule): ''' Parse the reboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--eject', dest='eject', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_repo(rule): ''' Parse the repo line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--baseurl', dest='baseurl', action='store') parser.add_argument('--mirrorlist', dest='mirrorlist', action='store') parser.add_argument('--cost', dest='cost', action='store') parser.add_argument('--excludepkgs', dest='excludepkgs', action='store') parser.add_argument('--includepkgs', dest='includepkgs', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--ignoregroups', dest='ignoregroups', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rescue(rule): ''' Parse the rescue line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--nomount', dest='nomount', action='store_true') parser.add_argument('--romount', dest='romount', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rootpw(rule): ''' Parse the rootpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('password') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_selinux(rule): ''' Parse the selinux line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store_true') parser.add_argument('--enforcing', dest='enforcing', action='store_true') parser.add_argument('--permissive', dest='permissive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_services(rule): ''' Parse the services line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store') parser.add_argument('--enabled', dest='enabled', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_sshpw(rule): ''' Parse the sshpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--username', dest='username', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_timezone(rule): ''' Parse the timezone line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--utc', dest='utc', action='store_true') parser.add_argument('--nontp', dest='nontp', action='store_true') parser.add_argument('--ntpservers', dest='ntpservers', action='store') parser.add_argument('--isUtc', dest='isutc', action='store_true') parser.add_argument('timezone') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True def parse_upgrade(rule): ''' Parse the upgrade line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--root-device', dest='root-device', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None if args: return args return True def parse_url(rule): ''' Parse the url line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--url', dest='url', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_user(rule): ''' Parse the user line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gecos', dest='gecos', action='store') parser.add_argument('--groups', dest='groups', action='store') parser.add_argument('--homedir', dest='homedir', action='store') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--shell', dest='shell', action='store') parser.add_argument('--uid', dest='uid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_vnc(rule): ''' Parse the vnc line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--password', dest='password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_volgroup(rule): ''' Parse the volgroup line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('name') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--pesize', dest='pesize', action='store') parser.add_argument('--reserved-space', dest='reserved-space', action='store') parser.add_argument('--reserved-percent', dest='reserved-percent', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_xconfig(rule): ''' Parse the xconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--defaultdesktop', dest='defaultdesktop', action='store') parser.add_argument('--startxonboot', dest='startxonboot', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_zfcp(rule): ''' Parse the zfcp line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--devnum', dest='devnum', action='store') parser.add_argument('--fcplun', dest='fcplun', action='store') parser.add_argument('--wwpn', dest='wwpn', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def mksls(src, dst=None): ''' Convert a kickstart file to an SLS file ''' mode = 'command' sls = {} ks_opts = {} with salt.utils.files.fopen(src, 'r') as fh_: for line in fh_: if line.startswith('#'): continue if mode == 'command': if line.startswith('auth ') or line.startswith('authconfig '): ks_opts['auth'] = parse_auth(line) elif line.startswith('autopart'): ks_opts['autopath'] = parse_autopart(line) elif line.startswith('autostep'): ks_opts['autostep'] = parse_autostep(line) elif line.startswith('bootloader'): ks_opts['bootloader'] = parse_bootloader(line) elif line.startswith('btrfs'): ks_opts['btrfs'] = parse_btrfs(line) elif line.startswith('cdrom'): ks_opts['cdrom'] = True elif line.startswith('clearpart'): ks_opts['clearpart'] = parse_clearpart(line) elif line.startswith('cmdline'): ks_opts['cmdline'] = True elif line.startswith('device'): ks_opts['device'] = parse_device(line) elif line.startswith('dmraid'): ks_opts['dmraid'] = parse_dmraid(line) elif line.startswith('driverdisk'): ks_opts['driverdisk'] = parse_driverdisk(line) elif line.startswith('firewall'): ks_opts['firewall'] = parse_firewall(line) elif line.startswith('firstboot'): ks_opts['firstboot'] = parse_firstboot(line) elif line.startswith('group'): ks_opts['group'] = parse_group(line) elif line.startswith('graphical'): ks_opts['graphical'] = True elif line.startswith('halt'): ks_opts['halt'] = True elif line.startswith('harddrive'): ks_opts['harddrive'] = True elif line.startswith('ignoredisk'): ks_opts['ignoredisk'] = parse_ignoredisk(line) elif line.startswith('install'): ks_opts['install'] = True elif line.startswith('iscsi'): ks_opts['iscsi'] = parse_iscsi(line) elif line.startswith('iscsiname'): ks_opts['iscsiname'] = parse_iscsiname(line) elif line.startswith('keyboard'): ks_opts['keyboard'] = parse_keyboard(line) elif line.startswith('lang'): ks_opts['lang'] = parse_lang(line) elif line.startswith('logvol'): if 'logvol' not in ks_opts.keys(): ks_opts['logvol'] = [] ks_opts['logvol'].append(parse_logvol(line)) elif line.startswith('logging'): ks_opts['logging'] = parse_logging(line) elif line.startswith('mediacheck'): ks_opts['mediacheck'] = True elif line.startswith('monitor'): ks_opts['monitor'] = parse_monitor(line) elif line.startswith('multipath'): ks_opts['multipath'] = parse_multipath(line) elif line.startswith('network'): if 'network' not in ks_opts.keys(): ks_opts['network'] = [] ks_opts['network'].append(parse_network(line)) elif line.startswith('nfs'): ks_opts['nfs'] = True elif line.startswith('part ') or line.startswith('partition'): if 'part' not in ks_opts.keys(): ks_opts['part'] = [] ks_opts['part'].append(parse_partition(line)) elif line.startswith('poweroff'): ks_opts['poweroff'] = True elif line.startswith('raid'): if 'raid' not in ks_opts.keys(): ks_opts['raid'] = [] ks_opts['raid'].append(parse_raid(line)) elif line.startswith('reboot'): ks_opts['reboot'] = parse_reboot(line) elif line.startswith('repo'): ks_opts['repo'] = parse_repo(line) elif line.startswith('rescue'): ks_opts['rescue'] = parse_rescue(line) elif line.startswith('rootpw'): ks_opts['rootpw'] = parse_rootpw(line) elif line.startswith('selinux'): ks_opts['selinux'] = parse_selinux(line) elif line.startswith('services'): ks_opts['services'] = parse_services(line) elif line.startswith('shutdown'): ks_opts['shutdown'] = True elif line.startswith('sshpw'): ks_opts['sshpw'] = parse_sshpw(line) elif line.startswith('skipx'): ks_opts['skipx'] = True elif line.startswith('text'): ks_opts['text'] = True elif line.startswith('timezone'): ks_opts['timezone'] = parse_timezone(line) elif line.startswith('updates'): ks_opts['updates'] = parse_updates(line) elif line.startswith('upgrade'): ks_opts['upgrade'] = parse_upgrade(line) elif line.startswith('url'): ks_opts['url'] = True elif line.startswith('user'): ks_opts['user'] = parse_user(line) elif line.startswith('vnc'): ks_opts['vnc'] = parse_vnc(line) elif line.startswith('volgroup'): ks_opts['volgroup'] = parse_volgroup(line) elif line.startswith('xconfig'): ks_opts['xconfig'] = parse_xconfig(line) elif line.startswith('zerombr'): ks_opts['zerombr'] = True elif line.startswith('zfcp'): ks_opts['zfcp'] = parse_zfcp(line) if line.startswith('%include'): rules = shlex.split(line) if not ks_opts['include']: ks_opts['include'] = [] ks_opts['include'].append(rules[1]) if line.startswith('%ksappend'): rules = shlex.split(line) if not ks_opts['ksappend']: ks_opts['ksappend'] = [] ks_opts['ksappend'].append(rules[1]) if line.startswith('%packages'): mode = 'packages' if 'packages' not in ks_opts.keys(): ks_opts['packages'] = {'packages': {}} parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--default', dest='default', action='store_true') parser.add_argument('--excludedocs', dest='excludedocs', action='store_true') parser.add_argument('--ignoremissing', dest='ignoremissing', action='store_true') parser.add_argument('--instLangs', dest='instLangs', action='store') parser.add_argument('--multilib', dest='multilib', action='store_true') parser.add_argument('--nodefaults', dest='nodefaults', action='store_true') parser.add_argument('--optional', dest='optional', action='store_true') parser.add_argument('--nobase', dest='nobase', action='store_true') args = clean_args(vars(parser.parse_args(opts))) ks_opts['packages']['options'] = args continue if line.startswith('%pre'): mode = 'pre' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['pre'] = {'options': args, 'script': ''} continue if line.startswith('%post'): mode = 'post' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--nochroot', dest='nochroot', action='store_true') parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['post'] = {'options': args, 'script': ''} continue if line.startswith('%end'): mode = None if mode == 'packages': if line.startswith('-'): package = line.replace('-', '', 1).strip() ks_opts['packages']['packages'][package] = False else: ks_opts['packages']['packages'][line.strip()] = True if mode == 'pre': ks_opts['pre']['script'] += line if mode == 'post': ks_opts['post']['script'] += line # Set language sls[ks_opts['lang']['lang']] = {'locale': ['system']} # Set keyboard sls[ks_opts['keyboard']['xlayouts']] = {'keyboard': ['system']} # Set timezone sls[ks_opts['timezone']['timezone']] = {'timezone': ['system']} if 'utc' in ks_opts['timezone'].keys(): sls[ks_opts['timezone']['timezone']]['timezone'].append('utc') # Set network if 'network' in ks_opts.keys(): for interface in ks_opts['network']: device = interface.get('device', None) if device is not None: del interface['device'] sls[device] = {'proto': interface['bootproto']} del interface['bootproto'] if 'onboot' in interface.keys(): if 'no' in interface['onboot']: sls[device]['enabled'] = False else: sls[device]['enabled'] = True del interface['onboot'] if 'noipv4' in interface.keys(): sls[device]['ipv4'] = {'enabled': False} del interface['noipv4'] if 'noipv6' in interface.keys(): sls[device]['ipv6'] = {'enabled': False} del interface['noipv6'] for option in interface: if type(interface[option]) is bool: sls[device][option] = {'enabled': [interface[option]]} else: sls[device][option] = interface[option] if 'hostname' in interface: sls['system'] = { 'network.system': { 'enabled': True, 'hostname': interface['hostname'], 'apply_hostname': True, } } # Set selinux if 'selinux' in ks_opts.keys(): for mode in ks_opts['selinux']: sls[mode] = {'selinux': ['mode']} # Get package data together if 'nobase' not in ks_opts['packages']['options']: sls['base'] = {'pkg_group': ['installed']} packages = ks_opts['packages']['packages'] for package in packages: if not packages[package]: continue if package and packages[package] is True: if package.startswith('@'): pkg_group = package.replace('@', '', 1) sls[pkg_group] = {'pkg_group': ['installed']} else: sls[package] = {'pkg': ['installed']} elif packages[package] is False: sls[package] = {'pkg': ['absent']} if dst: with salt.utils.files.fopen(dst, 'w') as fp_: salt.utils.yaml.safe_dump(sls, fp_, default_flow_style=False) else: return salt.utils.yaml.safe_dump(sls, default_flow_style=False)
saltstack/salt
salt/utils/kickstart.py
parse_raid
python
def parse_raid(rule): ''' Parse the raid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('mntpoint') parser.add_argument('--level', dest='level', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--spares', dest='spares', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args
Parse the raid line
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L560-L601
[ "def clean_args(args):\n '''\n Cleans up the args that weren't passed in\n '''\n for arg in args:\n if not args[arg]:\n del args[arg]\n return args\n" ]
# -*- coding: utf-8 -*- ''' Utilities for managing kickstart .. versionadded:: Beryllium ''' from __future__ import absolute_import, unicode_literals import shlex import argparse # pylint: disable=minimum-python-version import salt.utils.files import salt.utils.yaml from salt.ext.six.moves import range def clean_args(args): ''' Cleans up the args that weren't passed in ''' for arg in args: if not args[arg]: del args[arg] return args def parse_auth(rule): ''' Parses the auth/authconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) noargs = ('back', 'test', 'nostart', 'kickstart', 'probe', 'enablecache', 'disablecache', 'disablenis', 'enableshadow', 'disableshadow', 'enablemd5', 'disablemd5', 'enableldap', 'enableldapauth', 'enableldaptls', 'disableldap', 'disableldapauth', 'enablekrb5kdcdns', 'disablekrb5kdcdns', 'enablekrb5realmdns', 'disablekrb5realmdns', 'disablekrb5', 'disablehe-siod', 'enablesmbauth', 'disablesmbauth', 'enablewinbind', 'enablewinbindauth', 'disablewinbind', 'disablewinbindauth', 'enablewinbindusedefaultdomain', 'disablewinbindusedefaultdomain', 'enablewins', 'disablewins') for arg in noargs: parser.add_argument('--{0}'.format(arg), dest=arg, action='store_true') parser.add_argument('--enablenis', dest='enablenis', action='store') parser.add_argument('--hesiodrhs', dest='hesiodrhs', action='store') parser.add_argument('--krb5adminserver', dest='krb5adminserver', action='append') parser.add_argument('--krb5kdc', dest='krb5kdc', action='append') parser.add_argument('--ldapbasedn', dest='ldapbasedn', action='store') parser.add_argument('--ldapserver', dest='ldapserver', action='append') parser.add_argument('--nisserver', dest='nisserver', action='append') parser.add_argument('--passalgo', dest='passalgo', action='store') parser.add_argument('--smbidmapgid', dest='smbidmapgid', action='store') parser.add_argument('--smbidmapuid', dest='smbidmapuid', action='store') parser.add_argument('--smbrealm', dest='smbrealm', action='store') parser.add_argument('--smbsecurity', dest='smbsecurity', action='store', choices=['user', 'server', 'domain', 'dns']) parser.add_argument('--smbservers', dest='smbservers', action='store') parser.add_argument('--smbworkgroup', dest='smbworkgroup', action='store') parser.add_argument('--winbindjoin', dest='winbindjoin', action='store') parser.add_argument('--winbindseparator', dest='winbindseparator', action='store') parser.add_argument('--winbindtemplatehomedir', dest='winbindtemplatehomedir', action='store') parser.add_argument('--winbindtemplateprimarygroup', dest='winbindtemplateprimarygroup', action='store') parser.add_argument('--winbindtemplateshell', dest='winbindtemplateshell', action='store') parser.add_argument('--enablekrb5', dest='enablekrb5', action='store_true') if '--enablekrb5' in rules: parser.add_argument('--krb5realm', dest='krb5realm', action='store', required=True) parser.add_argument('--enablehesiod', dest='enablehesiod', action='store_true') if '--enablehesiod' in rules: parser.add_argument('--hesiodlhs', dest='hesiodlhs', action='store', required=True) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autopart(rule): ''' Parse the autopart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--type', dest='type', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autostep(rule): ''' Parse the autostep line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--autoscreenshot', dest='autoscreenshot', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_bootloader(rule): ''' Parse the bootloader line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--append', dest='append', action='store') parser.add_argument('--driveorder', dest='driveorder', action='store') parser.add_argument('--location', dest='location', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--md5pass', dest='md5pass', action='store') parser.add_argument('--upgrade', dest='upgrade', action='store_true') parser.add_argument('--timeout', dest='timeout', action='store') parser.add_argument('--boot-drive', dest='bootdrive', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_btrfs(rule): ''' Parse the btrfs line TODO: finish up the weird parsing on this one http://fedoraproject.org/wiki/Anaconda/Kickstart#btrfs ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--data', dest='data', action='store') parser.add_argument('--metadata', dest='metadata', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--subvol', dest='subvol', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_clearpart(rule): ''' Parse the clearpart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--all', dest='all', action='store_true') parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--init_label', dest='init_label', action='store_true') parser.add_argument('--linux', dest='linux', action='store_true') parser.add_argument('--none', dest='none', action='store_true') parser.add_argument('--initlabel', dest='init_label', action='store_true') parser.add_argument('--list', dest='list', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_device(rule): ''' Parse the device line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) modulename = rules.pop(0) parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) args['modulename'] = modulename parser = None return args def parse_dmraid(rule): ''' Parse the dmraid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--dev', dest='dev', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_driverdisk(rule): ''' Parse the driverdisk line ''' if '--' not in rule: return {'partition': rule} parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--source', dest='source', action='store') parser.add_argument('--biospart', dest='biospart', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firewall(rule): ''' Parse the firewall line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--service', dest='service', action='store') parser.add_argument('--ssh', dest='ssh', action='store_true') parser.add_argument('--smtp', dest='smtp', action='store_true') parser.add_argument('--http', dest='http', action='store_true') parser.add_argument('--ftp', dest='ftp', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firstboot(rule): ''' Parse the firstboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--reconfig', dest='reconfig', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_group(rule): ''' Parse the group line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gid', dest='gid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_harddrive(rule): ''' Parse the harddrive line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--biospart', dest='biospart', action='store') parser.add_argument('--partition', dest='partition', action='store') parser.add_argument('--dir', dest='dir', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_ignoredisk(rule): ''' Parse the ignoredisk line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--only-use', dest='only-use', action='store') parser.add_argument('--interactive', dest='interactive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsi(rule): ''' Parse the iscsi line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--ipaddr', dest='ipaddr', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--target', dest='target', action='store') parser.add_argument('--iface', dest='iface', action='store') parser.add_argument('--user', dest='user', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--reverse-user', dest='reverse-user', action='store') parser.add_argument('--reverse-password', dest='reverse-password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsiname(rule): ''' Parse the iscsiname line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) #parser.add_argument('iqn') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_keyboard(rule): ''' Parse the keyboard line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--vckeymap', dest='vckeymap', action='store') parser.add_argument('--xlayouts', dest='xlayouts', action='store') parser.add_argument('--switch', dest='switch', action='store') parser.add_argument('keyboard') args = clean_args(vars(parser.parse_args(rules))) if 'keyboard' in args and 'xlayouts' not in args: args['xlayouts'] = args['keyboard'] parser = None return args def parse_lang(rule): ''' Parse the lang line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('lang') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logvol(rule): ''' Parse the logvol line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--percent', dest='percent', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store_true') parser.add_argument('--name', dest='name', action='store') parser.add_argument('--vgname', dest='vgname', action='store') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--label', dest='label', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logging(rule): ''' Parse the logging line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--level', dest='level', action='store', choices=['debug', 'info', 'warning', 'error', 'critical']) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_monitor(rule): ''' Parse the monitor line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--hsync', dest='hsync', action='store') parser.add_argument('--monitor', dest='monitor', action='store') parser.add_argument('--noprobe', dest='noprobe', action='store_true') parser.add_argument('--vsync', dest='vsync', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_multipath(rule): ''' Parse the multipath line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--rule', dest='rule', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_network(rule): ''' Parse the network line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--bootproto', dest='bootproto', action='store', choices=['dhcp', 'bootp', 'static', 'ibft']) parser.add_argument('--device', dest='device', action='store') parser.add_argument('--ip', dest='ip', action='store') parser.add_argument('--ipv6', dest='ipv6', action='store') parser.add_argument('--gateway', dest='gateway', action='store') parser.add_argument('--nodefroute', dest='nodefroute', action='store_true') parser.add_argument('--nameserver', dest='nameserver', action='store') parser.add_argument('--nodns', dest='nodns', action='store_true') parser.add_argument('--netmask', dest='netmask', action='store') parser.add_argument('--hostname', dest='hostname', action='store') parser.add_argument('--ethtool', dest='ethtool', action='store') parser.add_argument('--essid', dest='essid', action='store') parser.add_argument('--wepkey', dest='wepkey', action='store') parser.add_argument('--wpakey', dest='wpakey', action='store') parser.add_argument('--onboot', dest='onboot', action='store') parser.add_argument('--dhcpclass', dest='dhcpclass', action='store') parser.add_argument('--mtu', dest='mtu', action='store') parser.add_argument('--noipv4', dest='noipv4', action='store_true') parser.add_argument('--noipv6', dest='noipv6', action='store_true') parser.add_argument('--activate', dest='activate', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_nfs(rule): ''' Parse the nfs line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--server', dest='server', action='store') parser.add_argument('--dir', dest='dir', action='store') parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_partition(rule): ''' Parse the partition line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--onpart', '--usepart', dest='onpart', action='store') parser.add_argument('--ondisk', '--ondrive', dest='ondisk', action='store') parser.add_argument('--asprimary', dest='asprimary', action='store_true') parser.add_argument('--fsprofile', dest='fsprofile', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--onbiosdisk', dest='onbiosdisk', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backupphrase', dest='backupphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_reboot(rule): ''' Parse the reboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--eject', dest='eject', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_repo(rule): ''' Parse the repo line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--baseurl', dest='baseurl', action='store') parser.add_argument('--mirrorlist', dest='mirrorlist', action='store') parser.add_argument('--cost', dest='cost', action='store') parser.add_argument('--excludepkgs', dest='excludepkgs', action='store') parser.add_argument('--includepkgs', dest='includepkgs', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--ignoregroups', dest='ignoregroups', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rescue(rule): ''' Parse the rescue line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--nomount', dest='nomount', action='store_true') parser.add_argument('--romount', dest='romount', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rootpw(rule): ''' Parse the rootpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('password') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_selinux(rule): ''' Parse the selinux line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store_true') parser.add_argument('--enforcing', dest='enforcing', action='store_true') parser.add_argument('--permissive', dest='permissive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_services(rule): ''' Parse the services line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store') parser.add_argument('--enabled', dest='enabled', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_sshpw(rule): ''' Parse the sshpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--username', dest='username', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_timezone(rule): ''' Parse the timezone line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--utc', dest='utc', action='store_true') parser.add_argument('--nontp', dest='nontp', action='store_true') parser.add_argument('--ntpservers', dest='ntpservers', action='store') parser.add_argument('--isUtc', dest='isutc', action='store_true') parser.add_argument('timezone') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True def parse_upgrade(rule): ''' Parse the upgrade line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--root-device', dest='root-device', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None if args: return args return True def parse_url(rule): ''' Parse the url line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--url', dest='url', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_user(rule): ''' Parse the user line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gecos', dest='gecos', action='store') parser.add_argument('--groups', dest='groups', action='store') parser.add_argument('--homedir', dest='homedir', action='store') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--shell', dest='shell', action='store') parser.add_argument('--uid', dest='uid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_vnc(rule): ''' Parse the vnc line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--password', dest='password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_volgroup(rule): ''' Parse the volgroup line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('name') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--pesize', dest='pesize', action='store') parser.add_argument('--reserved-space', dest='reserved-space', action='store') parser.add_argument('--reserved-percent', dest='reserved-percent', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_xconfig(rule): ''' Parse the xconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--defaultdesktop', dest='defaultdesktop', action='store') parser.add_argument('--startxonboot', dest='startxonboot', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_zfcp(rule): ''' Parse the zfcp line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--devnum', dest='devnum', action='store') parser.add_argument('--fcplun', dest='fcplun', action='store') parser.add_argument('--wwpn', dest='wwpn', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def mksls(src, dst=None): ''' Convert a kickstart file to an SLS file ''' mode = 'command' sls = {} ks_opts = {} with salt.utils.files.fopen(src, 'r') as fh_: for line in fh_: if line.startswith('#'): continue if mode == 'command': if line.startswith('auth ') or line.startswith('authconfig '): ks_opts['auth'] = parse_auth(line) elif line.startswith('autopart'): ks_opts['autopath'] = parse_autopart(line) elif line.startswith('autostep'): ks_opts['autostep'] = parse_autostep(line) elif line.startswith('bootloader'): ks_opts['bootloader'] = parse_bootloader(line) elif line.startswith('btrfs'): ks_opts['btrfs'] = parse_btrfs(line) elif line.startswith('cdrom'): ks_opts['cdrom'] = True elif line.startswith('clearpart'): ks_opts['clearpart'] = parse_clearpart(line) elif line.startswith('cmdline'): ks_opts['cmdline'] = True elif line.startswith('device'): ks_opts['device'] = parse_device(line) elif line.startswith('dmraid'): ks_opts['dmraid'] = parse_dmraid(line) elif line.startswith('driverdisk'): ks_opts['driverdisk'] = parse_driverdisk(line) elif line.startswith('firewall'): ks_opts['firewall'] = parse_firewall(line) elif line.startswith('firstboot'): ks_opts['firstboot'] = parse_firstboot(line) elif line.startswith('group'): ks_opts['group'] = parse_group(line) elif line.startswith('graphical'): ks_opts['graphical'] = True elif line.startswith('halt'): ks_opts['halt'] = True elif line.startswith('harddrive'): ks_opts['harddrive'] = True elif line.startswith('ignoredisk'): ks_opts['ignoredisk'] = parse_ignoredisk(line) elif line.startswith('install'): ks_opts['install'] = True elif line.startswith('iscsi'): ks_opts['iscsi'] = parse_iscsi(line) elif line.startswith('iscsiname'): ks_opts['iscsiname'] = parse_iscsiname(line) elif line.startswith('keyboard'): ks_opts['keyboard'] = parse_keyboard(line) elif line.startswith('lang'): ks_opts['lang'] = parse_lang(line) elif line.startswith('logvol'): if 'logvol' not in ks_opts.keys(): ks_opts['logvol'] = [] ks_opts['logvol'].append(parse_logvol(line)) elif line.startswith('logging'): ks_opts['logging'] = parse_logging(line) elif line.startswith('mediacheck'): ks_opts['mediacheck'] = True elif line.startswith('monitor'): ks_opts['monitor'] = parse_monitor(line) elif line.startswith('multipath'): ks_opts['multipath'] = parse_multipath(line) elif line.startswith('network'): if 'network' not in ks_opts.keys(): ks_opts['network'] = [] ks_opts['network'].append(parse_network(line)) elif line.startswith('nfs'): ks_opts['nfs'] = True elif line.startswith('part ') or line.startswith('partition'): if 'part' not in ks_opts.keys(): ks_opts['part'] = [] ks_opts['part'].append(parse_partition(line)) elif line.startswith('poweroff'): ks_opts['poweroff'] = True elif line.startswith('raid'): if 'raid' not in ks_opts.keys(): ks_opts['raid'] = [] ks_opts['raid'].append(parse_raid(line)) elif line.startswith('reboot'): ks_opts['reboot'] = parse_reboot(line) elif line.startswith('repo'): ks_opts['repo'] = parse_repo(line) elif line.startswith('rescue'): ks_opts['rescue'] = parse_rescue(line) elif line.startswith('rootpw'): ks_opts['rootpw'] = parse_rootpw(line) elif line.startswith('selinux'): ks_opts['selinux'] = parse_selinux(line) elif line.startswith('services'): ks_opts['services'] = parse_services(line) elif line.startswith('shutdown'): ks_opts['shutdown'] = True elif line.startswith('sshpw'): ks_opts['sshpw'] = parse_sshpw(line) elif line.startswith('skipx'): ks_opts['skipx'] = True elif line.startswith('text'): ks_opts['text'] = True elif line.startswith('timezone'): ks_opts['timezone'] = parse_timezone(line) elif line.startswith('updates'): ks_opts['updates'] = parse_updates(line) elif line.startswith('upgrade'): ks_opts['upgrade'] = parse_upgrade(line) elif line.startswith('url'): ks_opts['url'] = True elif line.startswith('user'): ks_opts['user'] = parse_user(line) elif line.startswith('vnc'): ks_opts['vnc'] = parse_vnc(line) elif line.startswith('volgroup'): ks_opts['volgroup'] = parse_volgroup(line) elif line.startswith('xconfig'): ks_opts['xconfig'] = parse_xconfig(line) elif line.startswith('zerombr'): ks_opts['zerombr'] = True elif line.startswith('zfcp'): ks_opts['zfcp'] = parse_zfcp(line) if line.startswith('%include'): rules = shlex.split(line) if not ks_opts['include']: ks_opts['include'] = [] ks_opts['include'].append(rules[1]) if line.startswith('%ksappend'): rules = shlex.split(line) if not ks_opts['ksappend']: ks_opts['ksappend'] = [] ks_opts['ksappend'].append(rules[1]) if line.startswith('%packages'): mode = 'packages' if 'packages' not in ks_opts.keys(): ks_opts['packages'] = {'packages': {}} parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--default', dest='default', action='store_true') parser.add_argument('--excludedocs', dest='excludedocs', action='store_true') parser.add_argument('--ignoremissing', dest='ignoremissing', action='store_true') parser.add_argument('--instLangs', dest='instLangs', action='store') parser.add_argument('--multilib', dest='multilib', action='store_true') parser.add_argument('--nodefaults', dest='nodefaults', action='store_true') parser.add_argument('--optional', dest='optional', action='store_true') parser.add_argument('--nobase', dest='nobase', action='store_true') args = clean_args(vars(parser.parse_args(opts))) ks_opts['packages']['options'] = args continue if line.startswith('%pre'): mode = 'pre' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['pre'] = {'options': args, 'script': ''} continue if line.startswith('%post'): mode = 'post' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--nochroot', dest='nochroot', action='store_true') parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['post'] = {'options': args, 'script': ''} continue if line.startswith('%end'): mode = None if mode == 'packages': if line.startswith('-'): package = line.replace('-', '', 1).strip() ks_opts['packages']['packages'][package] = False else: ks_opts['packages']['packages'][line.strip()] = True if mode == 'pre': ks_opts['pre']['script'] += line if mode == 'post': ks_opts['post']['script'] += line # Set language sls[ks_opts['lang']['lang']] = {'locale': ['system']} # Set keyboard sls[ks_opts['keyboard']['xlayouts']] = {'keyboard': ['system']} # Set timezone sls[ks_opts['timezone']['timezone']] = {'timezone': ['system']} if 'utc' in ks_opts['timezone'].keys(): sls[ks_opts['timezone']['timezone']]['timezone'].append('utc') # Set network if 'network' in ks_opts.keys(): for interface in ks_opts['network']: device = interface.get('device', None) if device is not None: del interface['device'] sls[device] = {'proto': interface['bootproto']} del interface['bootproto'] if 'onboot' in interface.keys(): if 'no' in interface['onboot']: sls[device]['enabled'] = False else: sls[device]['enabled'] = True del interface['onboot'] if 'noipv4' in interface.keys(): sls[device]['ipv4'] = {'enabled': False} del interface['noipv4'] if 'noipv6' in interface.keys(): sls[device]['ipv6'] = {'enabled': False} del interface['noipv6'] for option in interface: if type(interface[option]) is bool: sls[device][option] = {'enabled': [interface[option]]} else: sls[device][option] = interface[option] if 'hostname' in interface: sls['system'] = { 'network.system': { 'enabled': True, 'hostname': interface['hostname'], 'apply_hostname': True, } } # Set selinux if 'selinux' in ks_opts.keys(): for mode in ks_opts['selinux']: sls[mode] = {'selinux': ['mode']} # Get package data together if 'nobase' not in ks_opts['packages']['options']: sls['base'] = {'pkg_group': ['installed']} packages = ks_opts['packages']['packages'] for package in packages: if not packages[package]: continue if package and packages[package] is True: if package.startswith('@'): pkg_group = package.replace('@', '', 1) sls[pkg_group] = {'pkg_group': ['installed']} else: sls[package] = {'pkg': ['installed']} elif packages[package] is False: sls[package] = {'pkg': ['absent']} if dst: with salt.utils.files.fopen(dst, 'w') as fp_: salt.utils.yaml.safe_dump(sls, fp_, default_flow_style=False) else: return salt.utils.yaml.safe_dump(sls, default_flow_style=False)
saltstack/salt
salt/utils/kickstart.py
parse_services
python
def parse_services(rule): ''' Parse the services line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store') parser.add_argument('--enabled', dest='enabled', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args
Parse the services line
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L689-L701
[ "def clean_args(args):\n '''\n Cleans up the args that weren't passed in\n '''\n for arg in args:\n if not args[arg]:\n del args[arg]\n return args\n" ]
# -*- coding: utf-8 -*- ''' Utilities for managing kickstart .. versionadded:: Beryllium ''' from __future__ import absolute_import, unicode_literals import shlex import argparse # pylint: disable=minimum-python-version import salt.utils.files import salt.utils.yaml from salt.ext.six.moves import range def clean_args(args): ''' Cleans up the args that weren't passed in ''' for arg in args: if not args[arg]: del args[arg] return args def parse_auth(rule): ''' Parses the auth/authconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) noargs = ('back', 'test', 'nostart', 'kickstart', 'probe', 'enablecache', 'disablecache', 'disablenis', 'enableshadow', 'disableshadow', 'enablemd5', 'disablemd5', 'enableldap', 'enableldapauth', 'enableldaptls', 'disableldap', 'disableldapauth', 'enablekrb5kdcdns', 'disablekrb5kdcdns', 'enablekrb5realmdns', 'disablekrb5realmdns', 'disablekrb5', 'disablehe-siod', 'enablesmbauth', 'disablesmbauth', 'enablewinbind', 'enablewinbindauth', 'disablewinbind', 'disablewinbindauth', 'enablewinbindusedefaultdomain', 'disablewinbindusedefaultdomain', 'enablewins', 'disablewins') for arg in noargs: parser.add_argument('--{0}'.format(arg), dest=arg, action='store_true') parser.add_argument('--enablenis', dest='enablenis', action='store') parser.add_argument('--hesiodrhs', dest='hesiodrhs', action='store') parser.add_argument('--krb5adminserver', dest='krb5adminserver', action='append') parser.add_argument('--krb5kdc', dest='krb5kdc', action='append') parser.add_argument('--ldapbasedn', dest='ldapbasedn', action='store') parser.add_argument('--ldapserver', dest='ldapserver', action='append') parser.add_argument('--nisserver', dest='nisserver', action='append') parser.add_argument('--passalgo', dest='passalgo', action='store') parser.add_argument('--smbidmapgid', dest='smbidmapgid', action='store') parser.add_argument('--smbidmapuid', dest='smbidmapuid', action='store') parser.add_argument('--smbrealm', dest='smbrealm', action='store') parser.add_argument('--smbsecurity', dest='smbsecurity', action='store', choices=['user', 'server', 'domain', 'dns']) parser.add_argument('--smbservers', dest='smbservers', action='store') parser.add_argument('--smbworkgroup', dest='smbworkgroup', action='store') parser.add_argument('--winbindjoin', dest='winbindjoin', action='store') parser.add_argument('--winbindseparator', dest='winbindseparator', action='store') parser.add_argument('--winbindtemplatehomedir', dest='winbindtemplatehomedir', action='store') parser.add_argument('--winbindtemplateprimarygroup', dest='winbindtemplateprimarygroup', action='store') parser.add_argument('--winbindtemplateshell', dest='winbindtemplateshell', action='store') parser.add_argument('--enablekrb5', dest='enablekrb5', action='store_true') if '--enablekrb5' in rules: parser.add_argument('--krb5realm', dest='krb5realm', action='store', required=True) parser.add_argument('--enablehesiod', dest='enablehesiod', action='store_true') if '--enablehesiod' in rules: parser.add_argument('--hesiodlhs', dest='hesiodlhs', action='store', required=True) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autopart(rule): ''' Parse the autopart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--type', dest='type', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autostep(rule): ''' Parse the autostep line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--autoscreenshot', dest='autoscreenshot', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_bootloader(rule): ''' Parse the bootloader line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--append', dest='append', action='store') parser.add_argument('--driveorder', dest='driveorder', action='store') parser.add_argument('--location', dest='location', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--md5pass', dest='md5pass', action='store') parser.add_argument('--upgrade', dest='upgrade', action='store_true') parser.add_argument('--timeout', dest='timeout', action='store') parser.add_argument('--boot-drive', dest='bootdrive', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_btrfs(rule): ''' Parse the btrfs line TODO: finish up the weird parsing on this one http://fedoraproject.org/wiki/Anaconda/Kickstart#btrfs ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--data', dest='data', action='store') parser.add_argument('--metadata', dest='metadata', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--subvol', dest='subvol', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_clearpart(rule): ''' Parse the clearpart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--all', dest='all', action='store_true') parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--init_label', dest='init_label', action='store_true') parser.add_argument('--linux', dest='linux', action='store_true') parser.add_argument('--none', dest='none', action='store_true') parser.add_argument('--initlabel', dest='init_label', action='store_true') parser.add_argument('--list', dest='list', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_device(rule): ''' Parse the device line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) modulename = rules.pop(0) parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) args['modulename'] = modulename parser = None return args def parse_dmraid(rule): ''' Parse the dmraid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--dev', dest='dev', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_driverdisk(rule): ''' Parse the driverdisk line ''' if '--' not in rule: return {'partition': rule} parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--source', dest='source', action='store') parser.add_argument('--biospart', dest='biospart', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firewall(rule): ''' Parse the firewall line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--service', dest='service', action='store') parser.add_argument('--ssh', dest='ssh', action='store_true') parser.add_argument('--smtp', dest='smtp', action='store_true') parser.add_argument('--http', dest='http', action='store_true') parser.add_argument('--ftp', dest='ftp', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firstboot(rule): ''' Parse the firstboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--reconfig', dest='reconfig', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_group(rule): ''' Parse the group line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gid', dest='gid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_harddrive(rule): ''' Parse the harddrive line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--biospart', dest='biospart', action='store') parser.add_argument('--partition', dest='partition', action='store') parser.add_argument('--dir', dest='dir', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_ignoredisk(rule): ''' Parse the ignoredisk line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--only-use', dest='only-use', action='store') parser.add_argument('--interactive', dest='interactive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsi(rule): ''' Parse the iscsi line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--ipaddr', dest='ipaddr', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--target', dest='target', action='store') parser.add_argument('--iface', dest='iface', action='store') parser.add_argument('--user', dest='user', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--reverse-user', dest='reverse-user', action='store') parser.add_argument('--reverse-password', dest='reverse-password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsiname(rule): ''' Parse the iscsiname line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) #parser.add_argument('iqn') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_keyboard(rule): ''' Parse the keyboard line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--vckeymap', dest='vckeymap', action='store') parser.add_argument('--xlayouts', dest='xlayouts', action='store') parser.add_argument('--switch', dest='switch', action='store') parser.add_argument('keyboard') args = clean_args(vars(parser.parse_args(rules))) if 'keyboard' in args and 'xlayouts' not in args: args['xlayouts'] = args['keyboard'] parser = None return args def parse_lang(rule): ''' Parse the lang line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('lang') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logvol(rule): ''' Parse the logvol line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--percent', dest='percent', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store_true') parser.add_argument('--name', dest='name', action='store') parser.add_argument('--vgname', dest='vgname', action='store') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--label', dest='label', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logging(rule): ''' Parse the logging line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--level', dest='level', action='store', choices=['debug', 'info', 'warning', 'error', 'critical']) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_monitor(rule): ''' Parse the monitor line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--hsync', dest='hsync', action='store') parser.add_argument('--monitor', dest='monitor', action='store') parser.add_argument('--noprobe', dest='noprobe', action='store_true') parser.add_argument('--vsync', dest='vsync', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_multipath(rule): ''' Parse the multipath line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--rule', dest='rule', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_network(rule): ''' Parse the network line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--bootproto', dest='bootproto', action='store', choices=['dhcp', 'bootp', 'static', 'ibft']) parser.add_argument('--device', dest='device', action='store') parser.add_argument('--ip', dest='ip', action='store') parser.add_argument('--ipv6', dest='ipv6', action='store') parser.add_argument('--gateway', dest='gateway', action='store') parser.add_argument('--nodefroute', dest='nodefroute', action='store_true') parser.add_argument('--nameserver', dest='nameserver', action='store') parser.add_argument('--nodns', dest='nodns', action='store_true') parser.add_argument('--netmask', dest='netmask', action='store') parser.add_argument('--hostname', dest='hostname', action='store') parser.add_argument('--ethtool', dest='ethtool', action='store') parser.add_argument('--essid', dest='essid', action='store') parser.add_argument('--wepkey', dest='wepkey', action='store') parser.add_argument('--wpakey', dest='wpakey', action='store') parser.add_argument('--onboot', dest='onboot', action='store') parser.add_argument('--dhcpclass', dest='dhcpclass', action='store') parser.add_argument('--mtu', dest='mtu', action='store') parser.add_argument('--noipv4', dest='noipv4', action='store_true') parser.add_argument('--noipv6', dest='noipv6', action='store_true') parser.add_argument('--activate', dest='activate', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_nfs(rule): ''' Parse the nfs line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--server', dest='server', action='store') parser.add_argument('--dir', dest='dir', action='store') parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_partition(rule): ''' Parse the partition line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--onpart', '--usepart', dest='onpart', action='store') parser.add_argument('--ondisk', '--ondrive', dest='ondisk', action='store') parser.add_argument('--asprimary', dest='asprimary', action='store_true') parser.add_argument('--fsprofile', dest='fsprofile', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--onbiosdisk', dest='onbiosdisk', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backupphrase', dest='backupphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_raid(rule): ''' Parse the raid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('mntpoint') parser.add_argument('--level', dest='level', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--spares', dest='spares', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_reboot(rule): ''' Parse the reboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--eject', dest='eject', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_repo(rule): ''' Parse the repo line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--baseurl', dest='baseurl', action='store') parser.add_argument('--mirrorlist', dest='mirrorlist', action='store') parser.add_argument('--cost', dest='cost', action='store') parser.add_argument('--excludepkgs', dest='excludepkgs', action='store') parser.add_argument('--includepkgs', dest='includepkgs', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--ignoregroups', dest='ignoregroups', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rescue(rule): ''' Parse the rescue line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--nomount', dest='nomount', action='store_true') parser.add_argument('--romount', dest='romount', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rootpw(rule): ''' Parse the rootpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('password') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_selinux(rule): ''' Parse the selinux line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store_true') parser.add_argument('--enforcing', dest='enforcing', action='store_true') parser.add_argument('--permissive', dest='permissive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_sshpw(rule): ''' Parse the sshpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--username', dest='username', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_timezone(rule): ''' Parse the timezone line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--utc', dest='utc', action='store_true') parser.add_argument('--nontp', dest='nontp', action='store_true') parser.add_argument('--ntpservers', dest='ntpservers', action='store') parser.add_argument('--isUtc', dest='isutc', action='store_true') parser.add_argument('timezone') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True def parse_upgrade(rule): ''' Parse the upgrade line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--root-device', dest='root-device', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None if args: return args return True def parse_url(rule): ''' Parse the url line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--url', dest='url', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_user(rule): ''' Parse the user line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gecos', dest='gecos', action='store') parser.add_argument('--groups', dest='groups', action='store') parser.add_argument('--homedir', dest='homedir', action='store') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--shell', dest='shell', action='store') parser.add_argument('--uid', dest='uid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_vnc(rule): ''' Parse the vnc line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--password', dest='password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_volgroup(rule): ''' Parse the volgroup line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('name') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--pesize', dest='pesize', action='store') parser.add_argument('--reserved-space', dest='reserved-space', action='store') parser.add_argument('--reserved-percent', dest='reserved-percent', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_xconfig(rule): ''' Parse the xconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--defaultdesktop', dest='defaultdesktop', action='store') parser.add_argument('--startxonboot', dest='startxonboot', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_zfcp(rule): ''' Parse the zfcp line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--devnum', dest='devnum', action='store') parser.add_argument('--fcplun', dest='fcplun', action='store') parser.add_argument('--wwpn', dest='wwpn', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def mksls(src, dst=None): ''' Convert a kickstart file to an SLS file ''' mode = 'command' sls = {} ks_opts = {} with salt.utils.files.fopen(src, 'r') as fh_: for line in fh_: if line.startswith('#'): continue if mode == 'command': if line.startswith('auth ') or line.startswith('authconfig '): ks_opts['auth'] = parse_auth(line) elif line.startswith('autopart'): ks_opts['autopath'] = parse_autopart(line) elif line.startswith('autostep'): ks_opts['autostep'] = parse_autostep(line) elif line.startswith('bootloader'): ks_opts['bootloader'] = parse_bootloader(line) elif line.startswith('btrfs'): ks_opts['btrfs'] = parse_btrfs(line) elif line.startswith('cdrom'): ks_opts['cdrom'] = True elif line.startswith('clearpart'): ks_opts['clearpart'] = parse_clearpart(line) elif line.startswith('cmdline'): ks_opts['cmdline'] = True elif line.startswith('device'): ks_opts['device'] = parse_device(line) elif line.startswith('dmraid'): ks_opts['dmraid'] = parse_dmraid(line) elif line.startswith('driverdisk'): ks_opts['driverdisk'] = parse_driverdisk(line) elif line.startswith('firewall'): ks_opts['firewall'] = parse_firewall(line) elif line.startswith('firstboot'): ks_opts['firstboot'] = parse_firstboot(line) elif line.startswith('group'): ks_opts['group'] = parse_group(line) elif line.startswith('graphical'): ks_opts['graphical'] = True elif line.startswith('halt'): ks_opts['halt'] = True elif line.startswith('harddrive'): ks_opts['harddrive'] = True elif line.startswith('ignoredisk'): ks_opts['ignoredisk'] = parse_ignoredisk(line) elif line.startswith('install'): ks_opts['install'] = True elif line.startswith('iscsi'): ks_opts['iscsi'] = parse_iscsi(line) elif line.startswith('iscsiname'): ks_opts['iscsiname'] = parse_iscsiname(line) elif line.startswith('keyboard'): ks_opts['keyboard'] = parse_keyboard(line) elif line.startswith('lang'): ks_opts['lang'] = parse_lang(line) elif line.startswith('logvol'): if 'logvol' not in ks_opts.keys(): ks_opts['logvol'] = [] ks_opts['logvol'].append(parse_logvol(line)) elif line.startswith('logging'): ks_opts['logging'] = parse_logging(line) elif line.startswith('mediacheck'): ks_opts['mediacheck'] = True elif line.startswith('monitor'): ks_opts['monitor'] = parse_monitor(line) elif line.startswith('multipath'): ks_opts['multipath'] = parse_multipath(line) elif line.startswith('network'): if 'network' not in ks_opts.keys(): ks_opts['network'] = [] ks_opts['network'].append(parse_network(line)) elif line.startswith('nfs'): ks_opts['nfs'] = True elif line.startswith('part ') or line.startswith('partition'): if 'part' not in ks_opts.keys(): ks_opts['part'] = [] ks_opts['part'].append(parse_partition(line)) elif line.startswith('poweroff'): ks_opts['poweroff'] = True elif line.startswith('raid'): if 'raid' not in ks_opts.keys(): ks_opts['raid'] = [] ks_opts['raid'].append(parse_raid(line)) elif line.startswith('reboot'): ks_opts['reboot'] = parse_reboot(line) elif line.startswith('repo'): ks_opts['repo'] = parse_repo(line) elif line.startswith('rescue'): ks_opts['rescue'] = parse_rescue(line) elif line.startswith('rootpw'): ks_opts['rootpw'] = parse_rootpw(line) elif line.startswith('selinux'): ks_opts['selinux'] = parse_selinux(line) elif line.startswith('services'): ks_opts['services'] = parse_services(line) elif line.startswith('shutdown'): ks_opts['shutdown'] = True elif line.startswith('sshpw'): ks_opts['sshpw'] = parse_sshpw(line) elif line.startswith('skipx'): ks_opts['skipx'] = True elif line.startswith('text'): ks_opts['text'] = True elif line.startswith('timezone'): ks_opts['timezone'] = parse_timezone(line) elif line.startswith('updates'): ks_opts['updates'] = parse_updates(line) elif line.startswith('upgrade'): ks_opts['upgrade'] = parse_upgrade(line) elif line.startswith('url'): ks_opts['url'] = True elif line.startswith('user'): ks_opts['user'] = parse_user(line) elif line.startswith('vnc'): ks_opts['vnc'] = parse_vnc(line) elif line.startswith('volgroup'): ks_opts['volgroup'] = parse_volgroup(line) elif line.startswith('xconfig'): ks_opts['xconfig'] = parse_xconfig(line) elif line.startswith('zerombr'): ks_opts['zerombr'] = True elif line.startswith('zfcp'): ks_opts['zfcp'] = parse_zfcp(line) if line.startswith('%include'): rules = shlex.split(line) if not ks_opts['include']: ks_opts['include'] = [] ks_opts['include'].append(rules[1]) if line.startswith('%ksappend'): rules = shlex.split(line) if not ks_opts['ksappend']: ks_opts['ksappend'] = [] ks_opts['ksappend'].append(rules[1]) if line.startswith('%packages'): mode = 'packages' if 'packages' not in ks_opts.keys(): ks_opts['packages'] = {'packages': {}} parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--default', dest='default', action='store_true') parser.add_argument('--excludedocs', dest='excludedocs', action='store_true') parser.add_argument('--ignoremissing', dest='ignoremissing', action='store_true') parser.add_argument('--instLangs', dest='instLangs', action='store') parser.add_argument('--multilib', dest='multilib', action='store_true') parser.add_argument('--nodefaults', dest='nodefaults', action='store_true') parser.add_argument('--optional', dest='optional', action='store_true') parser.add_argument('--nobase', dest='nobase', action='store_true') args = clean_args(vars(parser.parse_args(opts))) ks_opts['packages']['options'] = args continue if line.startswith('%pre'): mode = 'pre' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['pre'] = {'options': args, 'script': ''} continue if line.startswith('%post'): mode = 'post' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--nochroot', dest='nochroot', action='store_true') parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['post'] = {'options': args, 'script': ''} continue if line.startswith('%end'): mode = None if mode == 'packages': if line.startswith('-'): package = line.replace('-', '', 1).strip() ks_opts['packages']['packages'][package] = False else: ks_opts['packages']['packages'][line.strip()] = True if mode == 'pre': ks_opts['pre']['script'] += line if mode == 'post': ks_opts['post']['script'] += line # Set language sls[ks_opts['lang']['lang']] = {'locale': ['system']} # Set keyboard sls[ks_opts['keyboard']['xlayouts']] = {'keyboard': ['system']} # Set timezone sls[ks_opts['timezone']['timezone']] = {'timezone': ['system']} if 'utc' in ks_opts['timezone'].keys(): sls[ks_opts['timezone']['timezone']]['timezone'].append('utc') # Set network if 'network' in ks_opts.keys(): for interface in ks_opts['network']: device = interface.get('device', None) if device is not None: del interface['device'] sls[device] = {'proto': interface['bootproto']} del interface['bootproto'] if 'onboot' in interface.keys(): if 'no' in interface['onboot']: sls[device]['enabled'] = False else: sls[device]['enabled'] = True del interface['onboot'] if 'noipv4' in interface.keys(): sls[device]['ipv4'] = {'enabled': False} del interface['noipv4'] if 'noipv6' in interface.keys(): sls[device]['ipv6'] = {'enabled': False} del interface['noipv6'] for option in interface: if type(interface[option]) is bool: sls[device][option] = {'enabled': [interface[option]]} else: sls[device][option] = interface[option] if 'hostname' in interface: sls['system'] = { 'network.system': { 'enabled': True, 'hostname': interface['hostname'], 'apply_hostname': True, } } # Set selinux if 'selinux' in ks_opts.keys(): for mode in ks_opts['selinux']: sls[mode] = {'selinux': ['mode']} # Get package data together if 'nobase' not in ks_opts['packages']['options']: sls['base'] = {'pkg_group': ['installed']} packages = ks_opts['packages']['packages'] for package in packages: if not packages[package]: continue if package and packages[package] is True: if package.startswith('@'): pkg_group = package.replace('@', '', 1) sls[pkg_group] = {'pkg_group': ['installed']} else: sls[package] = {'pkg': ['installed']} elif packages[package] is False: sls[package] = {'pkg': ['absent']} if dst: with salt.utils.files.fopen(dst, 'w') as fp_: salt.utils.yaml.safe_dump(sls, fp_, default_flow_style=False) else: return salt.utils.yaml.safe_dump(sls, default_flow_style=False)
saltstack/salt
salt/utils/kickstart.py
parse_updates
python
def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True
Parse the updates line
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L739-L745
null
# -*- coding: utf-8 -*- ''' Utilities for managing kickstart .. versionadded:: Beryllium ''' from __future__ import absolute_import, unicode_literals import shlex import argparse # pylint: disable=minimum-python-version import salt.utils.files import salt.utils.yaml from salt.ext.six.moves import range def clean_args(args): ''' Cleans up the args that weren't passed in ''' for arg in args: if not args[arg]: del args[arg] return args def parse_auth(rule): ''' Parses the auth/authconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) noargs = ('back', 'test', 'nostart', 'kickstart', 'probe', 'enablecache', 'disablecache', 'disablenis', 'enableshadow', 'disableshadow', 'enablemd5', 'disablemd5', 'enableldap', 'enableldapauth', 'enableldaptls', 'disableldap', 'disableldapauth', 'enablekrb5kdcdns', 'disablekrb5kdcdns', 'enablekrb5realmdns', 'disablekrb5realmdns', 'disablekrb5', 'disablehe-siod', 'enablesmbauth', 'disablesmbauth', 'enablewinbind', 'enablewinbindauth', 'disablewinbind', 'disablewinbindauth', 'enablewinbindusedefaultdomain', 'disablewinbindusedefaultdomain', 'enablewins', 'disablewins') for arg in noargs: parser.add_argument('--{0}'.format(arg), dest=arg, action='store_true') parser.add_argument('--enablenis', dest='enablenis', action='store') parser.add_argument('--hesiodrhs', dest='hesiodrhs', action='store') parser.add_argument('--krb5adminserver', dest='krb5adminserver', action='append') parser.add_argument('--krb5kdc', dest='krb5kdc', action='append') parser.add_argument('--ldapbasedn', dest='ldapbasedn', action='store') parser.add_argument('--ldapserver', dest='ldapserver', action='append') parser.add_argument('--nisserver', dest='nisserver', action='append') parser.add_argument('--passalgo', dest='passalgo', action='store') parser.add_argument('--smbidmapgid', dest='smbidmapgid', action='store') parser.add_argument('--smbidmapuid', dest='smbidmapuid', action='store') parser.add_argument('--smbrealm', dest='smbrealm', action='store') parser.add_argument('--smbsecurity', dest='smbsecurity', action='store', choices=['user', 'server', 'domain', 'dns']) parser.add_argument('--smbservers', dest='smbservers', action='store') parser.add_argument('--smbworkgroup', dest='smbworkgroup', action='store') parser.add_argument('--winbindjoin', dest='winbindjoin', action='store') parser.add_argument('--winbindseparator', dest='winbindseparator', action='store') parser.add_argument('--winbindtemplatehomedir', dest='winbindtemplatehomedir', action='store') parser.add_argument('--winbindtemplateprimarygroup', dest='winbindtemplateprimarygroup', action='store') parser.add_argument('--winbindtemplateshell', dest='winbindtemplateshell', action='store') parser.add_argument('--enablekrb5', dest='enablekrb5', action='store_true') if '--enablekrb5' in rules: parser.add_argument('--krb5realm', dest='krb5realm', action='store', required=True) parser.add_argument('--enablehesiod', dest='enablehesiod', action='store_true') if '--enablehesiod' in rules: parser.add_argument('--hesiodlhs', dest='hesiodlhs', action='store', required=True) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autopart(rule): ''' Parse the autopart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--type', dest='type', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autostep(rule): ''' Parse the autostep line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--autoscreenshot', dest='autoscreenshot', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_bootloader(rule): ''' Parse the bootloader line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--append', dest='append', action='store') parser.add_argument('--driveorder', dest='driveorder', action='store') parser.add_argument('--location', dest='location', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--md5pass', dest='md5pass', action='store') parser.add_argument('--upgrade', dest='upgrade', action='store_true') parser.add_argument('--timeout', dest='timeout', action='store') parser.add_argument('--boot-drive', dest='bootdrive', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_btrfs(rule): ''' Parse the btrfs line TODO: finish up the weird parsing on this one http://fedoraproject.org/wiki/Anaconda/Kickstart#btrfs ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--data', dest='data', action='store') parser.add_argument('--metadata', dest='metadata', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--subvol', dest='subvol', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_clearpart(rule): ''' Parse the clearpart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--all', dest='all', action='store_true') parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--init_label', dest='init_label', action='store_true') parser.add_argument('--linux', dest='linux', action='store_true') parser.add_argument('--none', dest='none', action='store_true') parser.add_argument('--initlabel', dest='init_label', action='store_true') parser.add_argument('--list', dest='list', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_device(rule): ''' Parse the device line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) modulename = rules.pop(0) parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) args['modulename'] = modulename parser = None return args def parse_dmraid(rule): ''' Parse the dmraid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--dev', dest='dev', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_driverdisk(rule): ''' Parse the driverdisk line ''' if '--' not in rule: return {'partition': rule} parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--source', dest='source', action='store') parser.add_argument('--biospart', dest='biospart', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firewall(rule): ''' Parse the firewall line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--service', dest='service', action='store') parser.add_argument('--ssh', dest='ssh', action='store_true') parser.add_argument('--smtp', dest='smtp', action='store_true') parser.add_argument('--http', dest='http', action='store_true') parser.add_argument('--ftp', dest='ftp', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firstboot(rule): ''' Parse the firstboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--reconfig', dest='reconfig', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_group(rule): ''' Parse the group line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gid', dest='gid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_harddrive(rule): ''' Parse the harddrive line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--biospart', dest='biospart', action='store') parser.add_argument('--partition', dest='partition', action='store') parser.add_argument('--dir', dest='dir', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_ignoredisk(rule): ''' Parse the ignoredisk line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--only-use', dest='only-use', action='store') parser.add_argument('--interactive', dest='interactive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsi(rule): ''' Parse the iscsi line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--ipaddr', dest='ipaddr', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--target', dest='target', action='store') parser.add_argument('--iface', dest='iface', action='store') parser.add_argument('--user', dest='user', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--reverse-user', dest='reverse-user', action='store') parser.add_argument('--reverse-password', dest='reverse-password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsiname(rule): ''' Parse the iscsiname line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) #parser.add_argument('iqn') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_keyboard(rule): ''' Parse the keyboard line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--vckeymap', dest='vckeymap', action='store') parser.add_argument('--xlayouts', dest='xlayouts', action='store') parser.add_argument('--switch', dest='switch', action='store') parser.add_argument('keyboard') args = clean_args(vars(parser.parse_args(rules))) if 'keyboard' in args and 'xlayouts' not in args: args['xlayouts'] = args['keyboard'] parser = None return args def parse_lang(rule): ''' Parse the lang line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('lang') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logvol(rule): ''' Parse the logvol line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--percent', dest='percent', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store_true') parser.add_argument('--name', dest='name', action='store') parser.add_argument('--vgname', dest='vgname', action='store') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--label', dest='label', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logging(rule): ''' Parse the logging line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--level', dest='level', action='store', choices=['debug', 'info', 'warning', 'error', 'critical']) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_monitor(rule): ''' Parse the monitor line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--hsync', dest='hsync', action='store') parser.add_argument('--monitor', dest='monitor', action='store') parser.add_argument('--noprobe', dest='noprobe', action='store_true') parser.add_argument('--vsync', dest='vsync', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_multipath(rule): ''' Parse the multipath line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--rule', dest='rule', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_network(rule): ''' Parse the network line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--bootproto', dest='bootproto', action='store', choices=['dhcp', 'bootp', 'static', 'ibft']) parser.add_argument('--device', dest='device', action='store') parser.add_argument('--ip', dest='ip', action='store') parser.add_argument('--ipv6', dest='ipv6', action='store') parser.add_argument('--gateway', dest='gateway', action='store') parser.add_argument('--nodefroute', dest='nodefroute', action='store_true') parser.add_argument('--nameserver', dest='nameserver', action='store') parser.add_argument('--nodns', dest='nodns', action='store_true') parser.add_argument('--netmask', dest='netmask', action='store') parser.add_argument('--hostname', dest='hostname', action='store') parser.add_argument('--ethtool', dest='ethtool', action='store') parser.add_argument('--essid', dest='essid', action='store') parser.add_argument('--wepkey', dest='wepkey', action='store') parser.add_argument('--wpakey', dest='wpakey', action='store') parser.add_argument('--onboot', dest='onboot', action='store') parser.add_argument('--dhcpclass', dest='dhcpclass', action='store') parser.add_argument('--mtu', dest='mtu', action='store') parser.add_argument('--noipv4', dest='noipv4', action='store_true') parser.add_argument('--noipv6', dest='noipv6', action='store_true') parser.add_argument('--activate', dest='activate', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_nfs(rule): ''' Parse the nfs line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--server', dest='server', action='store') parser.add_argument('--dir', dest='dir', action='store') parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_partition(rule): ''' Parse the partition line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--onpart', '--usepart', dest='onpart', action='store') parser.add_argument('--ondisk', '--ondrive', dest='ondisk', action='store') parser.add_argument('--asprimary', dest='asprimary', action='store_true') parser.add_argument('--fsprofile', dest='fsprofile', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--onbiosdisk', dest='onbiosdisk', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backupphrase', dest='backupphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_raid(rule): ''' Parse the raid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('mntpoint') parser.add_argument('--level', dest='level', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--spares', dest='spares', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_reboot(rule): ''' Parse the reboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--eject', dest='eject', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_repo(rule): ''' Parse the repo line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--baseurl', dest='baseurl', action='store') parser.add_argument('--mirrorlist', dest='mirrorlist', action='store') parser.add_argument('--cost', dest='cost', action='store') parser.add_argument('--excludepkgs', dest='excludepkgs', action='store') parser.add_argument('--includepkgs', dest='includepkgs', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--ignoregroups', dest='ignoregroups', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rescue(rule): ''' Parse the rescue line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--nomount', dest='nomount', action='store_true') parser.add_argument('--romount', dest='romount', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rootpw(rule): ''' Parse the rootpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('password') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_selinux(rule): ''' Parse the selinux line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store_true') parser.add_argument('--enforcing', dest='enforcing', action='store_true') parser.add_argument('--permissive', dest='permissive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_services(rule): ''' Parse the services line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store') parser.add_argument('--enabled', dest='enabled', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_sshpw(rule): ''' Parse the sshpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--username', dest='username', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_timezone(rule): ''' Parse the timezone line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--utc', dest='utc', action='store_true') parser.add_argument('--nontp', dest='nontp', action='store_true') parser.add_argument('--ntpservers', dest='ntpservers', action='store') parser.add_argument('--isUtc', dest='isutc', action='store_true') parser.add_argument('timezone') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_upgrade(rule): ''' Parse the upgrade line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--root-device', dest='root-device', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None if args: return args return True def parse_url(rule): ''' Parse the url line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--url', dest='url', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_user(rule): ''' Parse the user line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gecos', dest='gecos', action='store') parser.add_argument('--groups', dest='groups', action='store') parser.add_argument('--homedir', dest='homedir', action='store') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--shell', dest='shell', action='store') parser.add_argument('--uid', dest='uid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_vnc(rule): ''' Parse the vnc line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--password', dest='password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_volgroup(rule): ''' Parse the volgroup line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('name') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--pesize', dest='pesize', action='store') parser.add_argument('--reserved-space', dest='reserved-space', action='store') parser.add_argument('--reserved-percent', dest='reserved-percent', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_xconfig(rule): ''' Parse the xconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--defaultdesktop', dest='defaultdesktop', action='store') parser.add_argument('--startxonboot', dest='startxonboot', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_zfcp(rule): ''' Parse the zfcp line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--devnum', dest='devnum', action='store') parser.add_argument('--fcplun', dest='fcplun', action='store') parser.add_argument('--wwpn', dest='wwpn', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def mksls(src, dst=None): ''' Convert a kickstart file to an SLS file ''' mode = 'command' sls = {} ks_opts = {} with salt.utils.files.fopen(src, 'r') as fh_: for line in fh_: if line.startswith('#'): continue if mode == 'command': if line.startswith('auth ') or line.startswith('authconfig '): ks_opts['auth'] = parse_auth(line) elif line.startswith('autopart'): ks_opts['autopath'] = parse_autopart(line) elif line.startswith('autostep'): ks_opts['autostep'] = parse_autostep(line) elif line.startswith('bootloader'): ks_opts['bootloader'] = parse_bootloader(line) elif line.startswith('btrfs'): ks_opts['btrfs'] = parse_btrfs(line) elif line.startswith('cdrom'): ks_opts['cdrom'] = True elif line.startswith('clearpart'): ks_opts['clearpart'] = parse_clearpart(line) elif line.startswith('cmdline'): ks_opts['cmdline'] = True elif line.startswith('device'): ks_opts['device'] = parse_device(line) elif line.startswith('dmraid'): ks_opts['dmraid'] = parse_dmraid(line) elif line.startswith('driverdisk'): ks_opts['driverdisk'] = parse_driverdisk(line) elif line.startswith('firewall'): ks_opts['firewall'] = parse_firewall(line) elif line.startswith('firstboot'): ks_opts['firstboot'] = parse_firstboot(line) elif line.startswith('group'): ks_opts['group'] = parse_group(line) elif line.startswith('graphical'): ks_opts['graphical'] = True elif line.startswith('halt'): ks_opts['halt'] = True elif line.startswith('harddrive'): ks_opts['harddrive'] = True elif line.startswith('ignoredisk'): ks_opts['ignoredisk'] = parse_ignoredisk(line) elif line.startswith('install'): ks_opts['install'] = True elif line.startswith('iscsi'): ks_opts['iscsi'] = parse_iscsi(line) elif line.startswith('iscsiname'): ks_opts['iscsiname'] = parse_iscsiname(line) elif line.startswith('keyboard'): ks_opts['keyboard'] = parse_keyboard(line) elif line.startswith('lang'): ks_opts['lang'] = parse_lang(line) elif line.startswith('logvol'): if 'logvol' not in ks_opts.keys(): ks_opts['logvol'] = [] ks_opts['logvol'].append(parse_logvol(line)) elif line.startswith('logging'): ks_opts['logging'] = parse_logging(line) elif line.startswith('mediacheck'): ks_opts['mediacheck'] = True elif line.startswith('monitor'): ks_opts['monitor'] = parse_monitor(line) elif line.startswith('multipath'): ks_opts['multipath'] = parse_multipath(line) elif line.startswith('network'): if 'network' not in ks_opts.keys(): ks_opts['network'] = [] ks_opts['network'].append(parse_network(line)) elif line.startswith('nfs'): ks_opts['nfs'] = True elif line.startswith('part ') or line.startswith('partition'): if 'part' not in ks_opts.keys(): ks_opts['part'] = [] ks_opts['part'].append(parse_partition(line)) elif line.startswith('poweroff'): ks_opts['poweroff'] = True elif line.startswith('raid'): if 'raid' not in ks_opts.keys(): ks_opts['raid'] = [] ks_opts['raid'].append(parse_raid(line)) elif line.startswith('reboot'): ks_opts['reboot'] = parse_reboot(line) elif line.startswith('repo'): ks_opts['repo'] = parse_repo(line) elif line.startswith('rescue'): ks_opts['rescue'] = parse_rescue(line) elif line.startswith('rootpw'): ks_opts['rootpw'] = parse_rootpw(line) elif line.startswith('selinux'): ks_opts['selinux'] = parse_selinux(line) elif line.startswith('services'): ks_opts['services'] = parse_services(line) elif line.startswith('shutdown'): ks_opts['shutdown'] = True elif line.startswith('sshpw'): ks_opts['sshpw'] = parse_sshpw(line) elif line.startswith('skipx'): ks_opts['skipx'] = True elif line.startswith('text'): ks_opts['text'] = True elif line.startswith('timezone'): ks_opts['timezone'] = parse_timezone(line) elif line.startswith('updates'): ks_opts['updates'] = parse_updates(line) elif line.startswith('upgrade'): ks_opts['upgrade'] = parse_upgrade(line) elif line.startswith('url'): ks_opts['url'] = True elif line.startswith('user'): ks_opts['user'] = parse_user(line) elif line.startswith('vnc'): ks_opts['vnc'] = parse_vnc(line) elif line.startswith('volgroup'): ks_opts['volgroup'] = parse_volgroup(line) elif line.startswith('xconfig'): ks_opts['xconfig'] = parse_xconfig(line) elif line.startswith('zerombr'): ks_opts['zerombr'] = True elif line.startswith('zfcp'): ks_opts['zfcp'] = parse_zfcp(line) if line.startswith('%include'): rules = shlex.split(line) if not ks_opts['include']: ks_opts['include'] = [] ks_opts['include'].append(rules[1]) if line.startswith('%ksappend'): rules = shlex.split(line) if not ks_opts['ksappend']: ks_opts['ksappend'] = [] ks_opts['ksappend'].append(rules[1]) if line.startswith('%packages'): mode = 'packages' if 'packages' not in ks_opts.keys(): ks_opts['packages'] = {'packages': {}} parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--default', dest='default', action='store_true') parser.add_argument('--excludedocs', dest='excludedocs', action='store_true') parser.add_argument('--ignoremissing', dest='ignoremissing', action='store_true') parser.add_argument('--instLangs', dest='instLangs', action='store') parser.add_argument('--multilib', dest='multilib', action='store_true') parser.add_argument('--nodefaults', dest='nodefaults', action='store_true') parser.add_argument('--optional', dest='optional', action='store_true') parser.add_argument('--nobase', dest='nobase', action='store_true') args = clean_args(vars(parser.parse_args(opts))) ks_opts['packages']['options'] = args continue if line.startswith('%pre'): mode = 'pre' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['pre'] = {'options': args, 'script': ''} continue if line.startswith('%post'): mode = 'post' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--nochroot', dest='nochroot', action='store_true') parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['post'] = {'options': args, 'script': ''} continue if line.startswith('%end'): mode = None if mode == 'packages': if line.startswith('-'): package = line.replace('-', '', 1).strip() ks_opts['packages']['packages'][package] = False else: ks_opts['packages']['packages'][line.strip()] = True if mode == 'pre': ks_opts['pre']['script'] += line if mode == 'post': ks_opts['post']['script'] += line # Set language sls[ks_opts['lang']['lang']] = {'locale': ['system']} # Set keyboard sls[ks_opts['keyboard']['xlayouts']] = {'keyboard': ['system']} # Set timezone sls[ks_opts['timezone']['timezone']] = {'timezone': ['system']} if 'utc' in ks_opts['timezone'].keys(): sls[ks_opts['timezone']['timezone']]['timezone'].append('utc') # Set network if 'network' in ks_opts.keys(): for interface in ks_opts['network']: device = interface.get('device', None) if device is not None: del interface['device'] sls[device] = {'proto': interface['bootproto']} del interface['bootproto'] if 'onboot' in interface.keys(): if 'no' in interface['onboot']: sls[device]['enabled'] = False else: sls[device]['enabled'] = True del interface['onboot'] if 'noipv4' in interface.keys(): sls[device]['ipv4'] = {'enabled': False} del interface['noipv4'] if 'noipv6' in interface.keys(): sls[device]['ipv6'] = {'enabled': False} del interface['noipv6'] for option in interface: if type(interface[option]) is bool: sls[device][option] = {'enabled': [interface[option]]} else: sls[device][option] = interface[option] if 'hostname' in interface: sls['system'] = { 'network.system': { 'enabled': True, 'hostname': interface['hostname'], 'apply_hostname': True, } } # Set selinux if 'selinux' in ks_opts.keys(): for mode in ks_opts['selinux']: sls[mode] = {'selinux': ['mode']} # Get package data together if 'nobase' not in ks_opts['packages']['options']: sls['base'] = {'pkg_group': ['installed']} packages = ks_opts['packages']['packages'] for package in packages: if not packages[package]: continue if package and packages[package] is True: if package.startswith('@'): pkg_group = package.replace('@', '', 1) sls[pkg_group] = {'pkg_group': ['installed']} else: sls[package] = {'pkg': ['installed']} elif packages[package] is False: sls[package] = {'pkg': ['absent']} if dst: with salt.utils.files.fopen(dst, 'w') as fp_: salt.utils.yaml.safe_dump(sls, fp_, default_flow_style=False) else: return salt.utils.yaml.safe_dump(sls, default_flow_style=False)
saltstack/salt
salt/utils/kickstart.py
parse_volgroup
python
def parse_volgroup(rule): ''' Parse the volgroup line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('name') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--pesize', dest='pesize', action='store') parser.add_argument('--reserved-space', dest='reserved-space', action='store') parser.add_argument('--reserved-percent', dest='reserved-percent', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args
Parse the volgroup line
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L820-L855
[ "def clean_args(args):\n '''\n Cleans up the args that weren't passed in\n '''\n for arg in args:\n if not args[arg]:\n del args[arg]\n return args\n" ]
# -*- coding: utf-8 -*- ''' Utilities for managing kickstart .. versionadded:: Beryllium ''' from __future__ import absolute_import, unicode_literals import shlex import argparse # pylint: disable=minimum-python-version import salt.utils.files import salt.utils.yaml from salt.ext.six.moves import range def clean_args(args): ''' Cleans up the args that weren't passed in ''' for arg in args: if not args[arg]: del args[arg] return args def parse_auth(rule): ''' Parses the auth/authconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) noargs = ('back', 'test', 'nostart', 'kickstart', 'probe', 'enablecache', 'disablecache', 'disablenis', 'enableshadow', 'disableshadow', 'enablemd5', 'disablemd5', 'enableldap', 'enableldapauth', 'enableldaptls', 'disableldap', 'disableldapauth', 'enablekrb5kdcdns', 'disablekrb5kdcdns', 'enablekrb5realmdns', 'disablekrb5realmdns', 'disablekrb5', 'disablehe-siod', 'enablesmbauth', 'disablesmbauth', 'enablewinbind', 'enablewinbindauth', 'disablewinbind', 'disablewinbindauth', 'enablewinbindusedefaultdomain', 'disablewinbindusedefaultdomain', 'enablewins', 'disablewins') for arg in noargs: parser.add_argument('--{0}'.format(arg), dest=arg, action='store_true') parser.add_argument('--enablenis', dest='enablenis', action='store') parser.add_argument('--hesiodrhs', dest='hesiodrhs', action='store') parser.add_argument('--krb5adminserver', dest='krb5adminserver', action='append') parser.add_argument('--krb5kdc', dest='krb5kdc', action='append') parser.add_argument('--ldapbasedn', dest='ldapbasedn', action='store') parser.add_argument('--ldapserver', dest='ldapserver', action='append') parser.add_argument('--nisserver', dest='nisserver', action='append') parser.add_argument('--passalgo', dest='passalgo', action='store') parser.add_argument('--smbidmapgid', dest='smbidmapgid', action='store') parser.add_argument('--smbidmapuid', dest='smbidmapuid', action='store') parser.add_argument('--smbrealm', dest='smbrealm', action='store') parser.add_argument('--smbsecurity', dest='smbsecurity', action='store', choices=['user', 'server', 'domain', 'dns']) parser.add_argument('--smbservers', dest='smbservers', action='store') parser.add_argument('--smbworkgroup', dest='smbworkgroup', action='store') parser.add_argument('--winbindjoin', dest='winbindjoin', action='store') parser.add_argument('--winbindseparator', dest='winbindseparator', action='store') parser.add_argument('--winbindtemplatehomedir', dest='winbindtemplatehomedir', action='store') parser.add_argument('--winbindtemplateprimarygroup', dest='winbindtemplateprimarygroup', action='store') parser.add_argument('--winbindtemplateshell', dest='winbindtemplateshell', action='store') parser.add_argument('--enablekrb5', dest='enablekrb5', action='store_true') if '--enablekrb5' in rules: parser.add_argument('--krb5realm', dest='krb5realm', action='store', required=True) parser.add_argument('--enablehesiod', dest='enablehesiod', action='store_true') if '--enablehesiod' in rules: parser.add_argument('--hesiodlhs', dest='hesiodlhs', action='store', required=True) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autopart(rule): ''' Parse the autopart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--type', dest='type', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autostep(rule): ''' Parse the autostep line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--autoscreenshot', dest='autoscreenshot', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_bootloader(rule): ''' Parse the bootloader line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--append', dest='append', action='store') parser.add_argument('--driveorder', dest='driveorder', action='store') parser.add_argument('--location', dest='location', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--md5pass', dest='md5pass', action='store') parser.add_argument('--upgrade', dest='upgrade', action='store_true') parser.add_argument('--timeout', dest='timeout', action='store') parser.add_argument('--boot-drive', dest='bootdrive', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_btrfs(rule): ''' Parse the btrfs line TODO: finish up the weird parsing on this one http://fedoraproject.org/wiki/Anaconda/Kickstart#btrfs ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--data', dest='data', action='store') parser.add_argument('--metadata', dest='metadata', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--subvol', dest='subvol', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_clearpart(rule): ''' Parse the clearpart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--all', dest='all', action='store_true') parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--init_label', dest='init_label', action='store_true') parser.add_argument('--linux', dest='linux', action='store_true') parser.add_argument('--none', dest='none', action='store_true') parser.add_argument('--initlabel', dest='init_label', action='store_true') parser.add_argument('--list', dest='list', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_device(rule): ''' Parse the device line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) modulename = rules.pop(0) parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) args['modulename'] = modulename parser = None return args def parse_dmraid(rule): ''' Parse the dmraid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--dev', dest='dev', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_driverdisk(rule): ''' Parse the driverdisk line ''' if '--' not in rule: return {'partition': rule} parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--source', dest='source', action='store') parser.add_argument('--biospart', dest='biospart', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firewall(rule): ''' Parse the firewall line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--service', dest='service', action='store') parser.add_argument('--ssh', dest='ssh', action='store_true') parser.add_argument('--smtp', dest='smtp', action='store_true') parser.add_argument('--http', dest='http', action='store_true') parser.add_argument('--ftp', dest='ftp', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firstboot(rule): ''' Parse the firstboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--reconfig', dest='reconfig', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_group(rule): ''' Parse the group line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gid', dest='gid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_harddrive(rule): ''' Parse the harddrive line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--biospart', dest='biospart', action='store') parser.add_argument('--partition', dest='partition', action='store') parser.add_argument('--dir', dest='dir', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_ignoredisk(rule): ''' Parse the ignoredisk line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--only-use', dest='only-use', action='store') parser.add_argument('--interactive', dest='interactive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsi(rule): ''' Parse the iscsi line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--ipaddr', dest='ipaddr', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--target', dest='target', action='store') parser.add_argument('--iface', dest='iface', action='store') parser.add_argument('--user', dest='user', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--reverse-user', dest='reverse-user', action='store') parser.add_argument('--reverse-password', dest='reverse-password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsiname(rule): ''' Parse the iscsiname line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) #parser.add_argument('iqn') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_keyboard(rule): ''' Parse the keyboard line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--vckeymap', dest='vckeymap', action='store') parser.add_argument('--xlayouts', dest='xlayouts', action='store') parser.add_argument('--switch', dest='switch', action='store') parser.add_argument('keyboard') args = clean_args(vars(parser.parse_args(rules))) if 'keyboard' in args and 'xlayouts' not in args: args['xlayouts'] = args['keyboard'] parser = None return args def parse_lang(rule): ''' Parse the lang line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('lang') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logvol(rule): ''' Parse the logvol line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--percent', dest='percent', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store_true') parser.add_argument('--name', dest='name', action='store') parser.add_argument('--vgname', dest='vgname', action='store') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--label', dest='label', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logging(rule): ''' Parse the logging line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--level', dest='level', action='store', choices=['debug', 'info', 'warning', 'error', 'critical']) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_monitor(rule): ''' Parse the monitor line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--hsync', dest='hsync', action='store') parser.add_argument('--monitor', dest='monitor', action='store') parser.add_argument('--noprobe', dest='noprobe', action='store_true') parser.add_argument('--vsync', dest='vsync', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_multipath(rule): ''' Parse the multipath line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--rule', dest='rule', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_network(rule): ''' Parse the network line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--bootproto', dest='bootproto', action='store', choices=['dhcp', 'bootp', 'static', 'ibft']) parser.add_argument('--device', dest='device', action='store') parser.add_argument('--ip', dest='ip', action='store') parser.add_argument('--ipv6', dest='ipv6', action='store') parser.add_argument('--gateway', dest='gateway', action='store') parser.add_argument('--nodefroute', dest='nodefroute', action='store_true') parser.add_argument('--nameserver', dest='nameserver', action='store') parser.add_argument('--nodns', dest='nodns', action='store_true') parser.add_argument('--netmask', dest='netmask', action='store') parser.add_argument('--hostname', dest='hostname', action='store') parser.add_argument('--ethtool', dest='ethtool', action='store') parser.add_argument('--essid', dest='essid', action='store') parser.add_argument('--wepkey', dest='wepkey', action='store') parser.add_argument('--wpakey', dest='wpakey', action='store') parser.add_argument('--onboot', dest='onboot', action='store') parser.add_argument('--dhcpclass', dest='dhcpclass', action='store') parser.add_argument('--mtu', dest='mtu', action='store') parser.add_argument('--noipv4', dest='noipv4', action='store_true') parser.add_argument('--noipv6', dest='noipv6', action='store_true') parser.add_argument('--activate', dest='activate', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_nfs(rule): ''' Parse the nfs line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--server', dest='server', action='store') parser.add_argument('--dir', dest='dir', action='store') parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_partition(rule): ''' Parse the partition line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--onpart', '--usepart', dest='onpart', action='store') parser.add_argument('--ondisk', '--ondrive', dest='ondisk', action='store') parser.add_argument('--asprimary', dest='asprimary', action='store_true') parser.add_argument('--fsprofile', dest='fsprofile', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--onbiosdisk', dest='onbiosdisk', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backupphrase', dest='backupphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_raid(rule): ''' Parse the raid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('mntpoint') parser.add_argument('--level', dest='level', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--spares', dest='spares', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_reboot(rule): ''' Parse the reboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--eject', dest='eject', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_repo(rule): ''' Parse the repo line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--baseurl', dest='baseurl', action='store') parser.add_argument('--mirrorlist', dest='mirrorlist', action='store') parser.add_argument('--cost', dest='cost', action='store') parser.add_argument('--excludepkgs', dest='excludepkgs', action='store') parser.add_argument('--includepkgs', dest='includepkgs', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--ignoregroups', dest='ignoregroups', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rescue(rule): ''' Parse the rescue line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--nomount', dest='nomount', action='store_true') parser.add_argument('--romount', dest='romount', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rootpw(rule): ''' Parse the rootpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('password') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_selinux(rule): ''' Parse the selinux line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store_true') parser.add_argument('--enforcing', dest='enforcing', action='store_true') parser.add_argument('--permissive', dest='permissive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_services(rule): ''' Parse the services line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store') parser.add_argument('--enabled', dest='enabled', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_sshpw(rule): ''' Parse the sshpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--username', dest='username', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_timezone(rule): ''' Parse the timezone line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--utc', dest='utc', action='store_true') parser.add_argument('--nontp', dest='nontp', action='store_true') parser.add_argument('--ntpservers', dest='ntpservers', action='store') parser.add_argument('--isUtc', dest='isutc', action='store_true') parser.add_argument('timezone') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True def parse_upgrade(rule): ''' Parse the upgrade line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--root-device', dest='root-device', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None if args: return args return True def parse_url(rule): ''' Parse the url line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--url', dest='url', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_user(rule): ''' Parse the user line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gecos', dest='gecos', action='store') parser.add_argument('--groups', dest='groups', action='store') parser.add_argument('--homedir', dest='homedir', action='store') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--shell', dest='shell', action='store') parser.add_argument('--uid', dest='uid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_vnc(rule): ''' Parse the vnc line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--password', dest='password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_xconfig(rule): ''' Parse the xconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--defaultdesktop', dest='defaultdesktop', action='store') parser.add_argument('--startxonboot', dest='startxonboot', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_zfcp(rule): ''' Parse the zfcp line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--devnum', dest='devnum', action='store') parser.add_argument('--fcplun', dest='fcplun', action='store') parser.add_argument('--wwpn', dest='wwpn', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def mksls(src, dst=None): ''' Convert a kickstart file to an SLS file ''' mode = 'command' sls = {} ks_opts = {} with salt.utils.files.fopen(src, 'r') as fh_: for line in fh_: if line.startswith('#'): continue if mode == 'command': if line.startswith('auth ') or line.startswith('authconfig '): ks_opts['auth'] = parse_auth(line) elif line.startswith('autopart'): ks_opts['autopath'] = parse_autopart(line) elif line.startswith('autostep'): ks_opts['autostep'] = parse_autostep(line) elif line.startswith('bootloader'): ks_opts['bootloader'] = parse_bootloader(line) elif line.startswith('btrfs'): ks_opts['btrfs'] = parse_btrfs(line) elif line.startswith('cdrom'): ks_opts['cdrom'] = True elif line.startswith('clearpart'): ks_opts['clearpart'] = parse_clearpart(line) elif line.startswith('cmdline'): ks_opts['cmdline'] = True elif line.startswith('device'): ks_opts['device'] = parse_device(line) elif line.startswith('dmraid'): ks_opts['dmraid'] = parse_dmraid(line) elif line.startswith('driverdisk'): ks_opts['driverdisk'] = parse_driverdisk(line) elif line.startswith('firewall'): ks_opts['firewall'] = parse_firewall(line) elif line.startswith('firstboot'): ks_opts['firstboot'] = parse_firstboot(line) elif line.startswith('group'): ks_opts['group'] = parse_group(line) elif line.startswith('graphical'): ks_opts['graphical'] = True elif line.startswith('halt'): ks_opts['halt'] = True elif line.startswith('harddrive'): ks_opts['harddrive'] = True elif line.startswith('ignoredisk'): ks_opts['ignoredisk'] = parse_ignoredisk(line) elif line.startswith('install'): ks_opts['install'] = True elif line.startswith('iscsi'): ks_opts['iscsi'] = parse_iscsi(line) elif line.startswith('iscsiname'): ks_opts['iscsiname'] = parse_iscsiname(line) elif line.startswith('keyboard'): ks_opts['keyboard'] = parse_keyboard(line) elif line.startswith('lang'): ks_opts['lang'] = parse_lang(line) elif line.startswith('logvol'): if 'logvol' not in ks_opts.keys(): ks_opts['logvol'] = [] ks_opts['logvol'].append(parse_logvol(line)) elif line.startswith('logging'): ks_opts['logging'] = parse_logging(line) elif line.startswith('mediacheck'): ks_opts['mediacheck'] = True elif line.startswith('monitor'): ks_opts['monitor'] = parse_monitor(line) elif line.startswith('multipath'): ks_opts['multipath'] = parse_multipath(line) elif line.startswith('network'): if 'network' not in ks_opts.keys(): ks_opts['network'] = [] ks_opts['network'].append(parse_network(line)) elif line.startswith('nfs'): ks_opts['nfs'] = True elif line.startswith('part ') or line.startswith('partition'): if 'part' not in ks_opts.keys(): ks_opts['part'] = [] ks_opts['part'].append(parse_partition(line)) elif line.startswith('poweroff'): ks_opts['poweroff'] = True elif line.startswith('raid'): if 'raid' not in ks_opts.keys(): ks_opts['raid'] = [] ks_opts['raid'].append(parse_raid(line)) elif line.startswith('reboot'): ks_opts['reboot'] = parse_reboot(line) elif line.startswith('repo'): ks_opts['repo'] = parse_repo(line) elif line.startswith('rescue'): ks_opts['rescue'] = parse_rescue(line) elif line.startswith('rootpw'): ks_opts['rootpw'] = parse_rootpw(line) elif line.startswith('selinux'): ks_opts['selinux'] = parse_selinux(line) elif line.startswith('services'): ks_opts['services'] = parse_services(line) elif line.startswith('shutdown'): ks_opts['shutdown'] = True elif line.startswith('sshpw'): ks_opts['sshpw'] = parse_sshpw(line) elif line.startswith('skipx'): ks_opts['skipx'] = True elif line.startswith('text'): ks_opts['text'] = True elif line.startswith('timezone'): ks_opts['timezone'] = parse_timezone(line) elif line.startswith('updates'): ks_opts['updates'] = parse_updates(line) elif line.startswith('upgrade'): ks_opts['upgrade'] = parse_upgrade(line) elif line.startswith('url'): ks_opts['url'] = True elif line.startswith('user'): ks_opts['user'] = parse_user(line) elif line.startswith('vnc'): ks_opts['vnc'] = parse_vnc(line) elif line.startswith('volgroup'): ks_opts['volgroup'] = parse_volgroup(line) elif line.startswith('xconfig'): ks_opts['xconfig'] = parse_xconfig(line) elif line.startswith('zerombr'): ks_opts['zerombr'] = True elif line.startswith('zfcp'): ks_opts['zfcp'] = parse_zfcp(line) if line.startswith('%include'): rules = shlex.split(line) if not ks_opts['include']: ks_opts['include'] = [] ks_opts['include'].append(rules[1]) if line.startswith('%ksappend'): rules = shlex.split(line) if not ks_opts['ksappend']: ks_opts['ksappend'] = [] ks_opts['ksappend'].append(rules[1]) if line.startswith('%packages'): mode = 'packages' if 'packages' not in ks_opts.keys(): ks_opts['packages'] = {'packages': {}} parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--default', dest='default', action='store_true') parser.add_argument('--excludedocs', dest='excludedocs', action='store_true') parser.add_argument('--ignoremissing', dest='ignoremissing', action='store_true') parser.add_argument('--instLangs', dest='instLangs', action='store') parser.add_argument('--multilib', dest='multilib', action='store_true') parser.add_argument('--nodefaults', dest='nodefaults', action='store_true') parser.add_argument('--optional', dest='optional', action='store_true') parser.add_argument('--nobase', dest='nobase', action='store_true') args = clean_args(vars(parser.parse_args(opts))) ks_opts['packages']['options'] = args continue if line.startswith('%pre'): mode = 'pre' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['pre'] = {'options': args, 'script': ''} continue if line.startswith('%post'): mode = 'post' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--nochroot', dest='nochroot', action='store_true') parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['post'] = {'options': args, 'script': ''} continue if line.startswith('%end'): mode = None if mode == 'packages': if line.startswith('-'): package = line.replace('-', '', 1).strip() ks_opts['packages']['packages'][package] = False else: ks_opts['packages']['packages'][line.strip()] = True if mode == 'pre': ks_opts['pre']['script'] += line if mode == 'post': ks_opts['post']['script'] += line # Set language sls[ks_opts['lang']['lang']] = {'locale': ['system']} # Set keyboard sls[ks_opts['keyboard']['xlayouts']] = {'keyboard': ['system']} # Set timezone sls[ks_opts['timezone']['timezone']] = {'timezone': ['system']} if 'utc' in ks_opts['timezone'].keys(): sls[ks_opts['timezone']['timezone']]['timezone'].append('utc') # Set network if 'network' in ks_opts.keys(): for interface in ks_opts['network']: device = interface.get('device', None) if device is not None: del interface['device'] sls[device] = {'proto': interface['bootproto']} del interface['bootproto'] if 'onboot' in interface.keys(): if 'no' in interface['onboot']: sls[device]['enabled'] = False else: sls[device]['enabled'] = True del interface['onboot'] if 'noipv4' in interface.keys(): sls[device]['ipv4'] = {'enabled': False} del interface['noipv4'] if 'noipv6' in interface.keys(): sls[device]['ipv6'] = {'enabled': False} del interface['noipv6'] for option in interface: if type(interface[option]) is bool: sls[device][option] = {'enabled': [interface[option]]} else: sls[device][option] = interface[option] if 'hostname' in interface: sls['system'] = { 'network.system': { 'enabled': True, 'hostname': interface['hostname'], 'apply_hostname': True, } } # Set selinux if 'selinux' in ks_opts.keys(): for mode in ks_opts['selinux']: sls[mode] = {'selinux': ['mode']} # Get package data together if 'nobase' not in ks_opts['packages']['options']: sls['base'] = {'pkg_group': ['installed']} packages = ks_opts['packages']['packages'] for package in packages: if not packages[package]: continue if package and packages[package] is True: if package.startswith('@'): pkg_group = package.replace('@', '', 1) sls[pkg_group] = {'pkg_group': ['installed']} else: sls[package] = {'pkg': ['installed']} elif packages[package] is False: sls[package] = {'pkg': ['absent']} if dst: with salt.utils.files.fopen(dst, 'w') as fp_: salt.utils.yaml.safe_dump(sls, fp_, default_flow_style=False) else: return salt.utils.yaml.safe_dump(sls, default_flow_style=False)
saltstack/salt
salt/utils/kickstart.py
mksls
python
def mksls(src, dst=None): ''' Convert a kickstart file to an SLS file ''' mode = 'command' sls = {} ks_opts = {} with salt.utils.files.fopen(src, 'r') as fh_: for line in fh_: if line.startswith('#'): continue if mode == 'command': if line.startswith('auth ') or line.startswith('authconfig '): ks_opts['auth'] = parse_auth(line) elif line.startswith('autopart'): ks_opts['autopath'] = parse_autopart(line) elif line.startswith('autostep'): ks_opts['autostep'] = parse_autostep(line) elif line.startswith('bootloader'): ks_opts['bootloader'] = parse_bootloader(line) elif line.startswith('btrfs'): ks_opts['btrfs'] = parse_btrfs(line) elif line.startswith('cdrom'): ks_opts['cdrom'] = True elif line.startswith('clearpart'): ks_opts['clearpart'] = parse_clearpart(line) elif line.startswith('cmdline'): ks_opts['cmdline'] = True elif line.startswith('device'): ks_opts['device'] = parse_device(line) elif line.startswith('dmraid'): ks_opts['dmraid'] = parse_dmraid(line) elif line.startswith('driverdisk'): ks_opts['driverdisk'] = parse_driverdisk(line) elif line.startswith('firewall'): ks_opts['firewall'] = parse_firewall(line) elif line.startswith('firstboot'): ks_opts['firstboot'] = parse_firstboot(line) elif line.startswith('group'): ks_opts['group'] = parse_group(line) elif line.startswith('graphical'): ks_opts['graphical'] = True elif line.startswith('halt'): ks_opts['halt'] = True elif line.startswith('harddrive'): ks_opts['harddrive'] = True elif line.startswith('ignoredisk'): ks_opts['ignoredisk'] = parse_ignoredisk(line) elif line.startswith('install'): ks_opts['install'] = True elif line.startswith('iscsi'): ks_opts['iscsi'] = parse_iscsi(line) elif line.startswith('iscsiname'): ks_opts['iscsiname'] = parse_iscsiname(line) elif line.startswith('keyboard'): ks_opts['keyboard'] = parse_keyboard(line) elif line.startswith('lang'): ks_opts['lang'] = parse_lang(line) elif line.startswith('logvol'): if 'logvol' not in ks_opts.keys(): ks_opts['logvol'] = [] ks_opts['logvol'].append(parse_logvol(line)) elif line.startswith('logging'): ks_opts['logging'] = parse_logging(line) elif line.startswith('mediacheck'): ks_opts['mediacheck'] = True elif line.startswith('monitor'): ks_opts['monitor'] = parse_monitor(line) elif line.startswith('multipath'): ks_opts['multipath'] = parse_multipath(line) elif line.startswith('network'): if 'network' not in ks_opts.keys(): ks_opts['network'] = [] ks_opts['network'].append(parse_network(line)) elif line.startswith('nfs'): ks_opts['nfs'] = True elif line.startswith('part ') or line.startswith('partition'): if 'part' not in ks_opts.keys(): ks_opts['part'] = [] ks_opts['part'].append(parse_partition(line)) elif line.startswith('poweroff'): ks_opts['poweroff'] = True elif line.startswith('raid'): if 'raid' not in ks_opts.keys(): ks_opts['raid'] = [] ks_opts['raid'].append(parse_raid(line)) elif line.startswith('reboot'): ks_opts['reboot'] = parse_reboot(line) elif line.startswith('repo'): ks_opts['repo'] = parse_repo(line) elif line.startswith('rescue'): ks_opts['rescue'] = parse_rescue(line) elif line.startswith('rootpw'): ks_opts['rootpw'] = parse_rootpw(line) elif line.startswith('selinux'): ks_opts['selinux'] = parse_selinux(line) elif line.startswith('services'): ks_opts['services'] = parse_services(line) elif line.startswith('shutdown'): ks_opts['shutdown'] = True elif line.startswith('sshpw'): ks_opts['sshpw'] = parse_sshpw(line) elif line.startswith('skipx'): ks_opts['skipx'] = True elif line.startswith('text'): ks_opts['text'] = True elif line.startswith('timezone'): ks_opts['timezone'] = parse_timezone(line) elif line.startswith('updates'): ks_opts['updates'] = parse_updates(line) elif line.startswith('upgrade'): ks_opts['upgrade'] = parse_upgrade(line) elif line.startswith('url'): ks_opts['url'] = True elif line.startswith('user'): ks_opts['user'] = parse_user(line) elif line.startswith('vnc'): ks_opts['vnc'] = parse_vnc(line) elif line.startswith('volgroup'): ks_opts['volgroup'] = parse_volgroup(line) elif line.startswith('xconfig'): ks_opts['xconfig'] = parse_xconfig(line) elif line.startswith('zerombr'): ks_opts['zerombr'] = True elif line.startswith('zfcp'): ks_opts['zfcp'] = parse_zfcp(line) if line.startswith('%include'): rules = shlex.split(line) if not ks_opts['include']: ks_opts['include'] = [] ks_opts['include'].append(rules[1]) if line.startswith('%ksappend'): rules = shlex.split(line) if not ks_opts['ksappend']: ks_opts['ksappend'] = [] ks_opts['ksappend'].append(rules[1]) if line.startswith('%packages'): mode = 'packages' if 'packages' not in ks_opts.keys(): ks_opts['packages'] = {'packages': {}} parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--default', dest='default', action='store_true') parser.add_argument('--excludedocs', dest='excludedocs', action='store_true') parser.add_argument('--ignoremissing', dest='ignoremissing', action='store_true') parser.add_argument('--instLangs', dest='instLangs', action='store') parser.add_argument('--multilib', dest='multilib', action='store_true') parser.add_argument('--nodefaults', dest='nodefaults', action='store_true') parser.add_argument('--optional', dest='optional', action='store_true') parser.add_argument('--nobase', dest='nobase', action='store_true') args = clean_args(vars(parser.parse_args(opts))) ks_opts['packages']['options'] = args continue if line.startswith('%pre'): mode = 'pre' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['pre'] = {'options': args, 'script': ''} continue if line.startswith('%post'): mode = 'post' parser = argparse.ArgumentParser() opts = shlex.split(line) opts.pop(0) parser.add_argument('--nochroot', dest='nochroot', action='store_true') parser.add_argument('--interpreter', dest='interpreter', action='store') parser.add_argument('--erroronfail', dest='erroronfail', action='store_true') parser.add_argument('--log', dest='log', action='store') args = clean_args(vars(parser.parse_args(opts))) ks_opts['post'] = {'options': args, 'script': ''} continue if line.startswith('%end'): mode = None if mode == 'packages': if line.startswith('-'): package = line.replace('-', '', 1).strip() ks_opts['packages']['packages'][package] = False else: ks_opts['packages']['packages'][line.strip()] = True if mode == 'pre': ks_opts['pre']['script'] += line if mode == 'post': ks_opts['post']['script'] += line # Set language sls[ks_opts['lang']['lang']] = {'locale': ['system']} # Set keyboard sls[ks_opts['keyboard']['xlayouts']] = {'keyboard': ['system']} # Set timezone sls[ks_opts['timezone']['timezone']] = {'timezone': ['system']} if 'utc' in ks_opts['timezone'].keys(): sls[ks_opts['timezone']['timezone']]['timezone'].append('utc') # Set network if 'network' in ks_opts.keys(): for interface in ks_opts['network']: device = interface.get('device', None) if device is not None: del interface['device'] sls[device] = {'proto': interface['bootproto']} del interface['bootproto'] if 'onboot' in interface.keys(): if 'no' in interface['onboot']: sls[device]['enabled'] = False else: sls[device]['enabled'] = True del interface['onboot'] if 'noipv4' in interface.keys(): sls[device]['ipv4'] = {'enabled': False} del interface['noipv4'] if 'noipv6' in interface.keys(): sls[device]['ipv6'] = {'enabled': False} del interface['noipv6'] for option in interface: if type(interface[option]) is bool: sls[device][option] = {'enabled': [interface[option]]} else: sls[device][option] = interface[option] if 'hostname' in interface: sls['system'] = { 'network.system': { 'enabled': True, 'hostname': interface['hostname'], 'apply_hostname': True, } } # Set selinux if 'selinux' in ks_opts.keys(): for mode in ks_opts['selinux']: sls[mode] = {'selinux': ['mode']} # Get package data together if 'nobase' not in ks_opts['packages']['options']: sls['base'] = {'pkg_group': ['installed']} packages = ks_opts['packages']['packages'] for package in packages: if not packages[package]: continue if package and packages[package] is True: if package.startswith('@'): pkg_group = package.replace('@', '', 1) sls[pkg_group] = {'pkg_group': ['installed']} else: sls[package] = {'pkg': ['installed']} elif packages[package] is False: sls[package] = {'pkg': ['absent']} if dst: with salt.utils.files.fopen(dst, 'w') as fp_: salt.utils.yaml.safe_dump(sls, fp_, default_flow_style=False) else: return salt.utils.yaml.safe_dump(sls, default_flow_style=False)
Convert a kickstart file to an SLS file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/kickstart.py#L891-L1178
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def safe_dump(data, stream=None, **kwargs):\n '''\n Use a custom dumper to ensure that defaultdict and OrderedDict are\n represented properly. Ensure that unicode strings are encoded unless\n explicitly told not to.\n '''\n if 'allow_unicode' not in kwargs:\n kwargs['allow_unicode'] = True\n return yaml.dump(data, stream, Dumper=SafeOrderedDumper, **kwargs)\n" ]
# -*- coding: utf-8 -*- ''' Utilities for managing kickstart .. versionadded:: Beryllium ''' from __future__ import absolute_import, unicode_literals import shlex import argparse # pylint: disable=minimum-python-version import salt.utils.files import salt.utils.yaml from salt.ext.six.moves import range def clean_args(args): ''' Cleans up the args that weren't passed in ''' for arg in args: if not args[arg]: del args[arg] return args def parse_auth(rule): ''' Parses the auth/authconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) noargs = ('back', 'test', 'nostart', 'kickstart', 'probe', 'enablecache', 'disablecache', 'disablenis', 'enableshadow', 'disableshadow', 'enablemd5', 'disablemd5', 'enableldap', 'enableldapauth', 'enableldaptls', 'disableldap', 'disableldapauth', 'enablekrb5kdcdns', 'disablekrb5kdcdns', 'enablekrb5realmdns', 'disablekrb5realmdns', 'disablekrb5', 'disablehe-siod', 'enablesmbauth', 'disablesmbauth', 'enablewinbind', 'enablewinbindauth', 'disablewinbind', 'disablewinbindauth', 'enablewinbindusedefaultdomain', 'disablewinbindusedefaultdomain', 'enablewins', 'disablewins') for arg in noargs: parser.add_argument('--{0}'.format(arg), dest=arg, action='store_true') parser.add_argument('--enablenis', dest='enablenis', action='store') parser.add_argument('--hesiodrhs', dest='hesiodrhs', action='store') parser.add_argument('--krb5adminserver', dest='krb5adminserver', action='append') parser.add_argument('--krb5kdc', dest='krb5kdc', action='append') parser.add_argument('--ldapbasedn', dest='ldapbasedn', action='store') parser.add_argument('--ldapserver', dest='ldapserver', action='append') parser.add_argument('--nisserver', dest='nisserver', action='append') parser.add_argument('--passalgo', dest='passalgo', action='store') parser.add_argument('--smbidmapgid', dest='smbidmapgid', action='store') parser.add_argument('--smbidmapuid', dest='smbidmapuid', action='store') parser.add_argument('--smbrealm', dest='smbrealm', action='store') parser.add_argument('--smbsecurity', dest='smbsecurity', action='store', choices=['user', 'server', 'domain', 'dns']) parser.add_argument('--smbservers', dest='smbservers', action='store') parser.add_argument('--smbworkgroup', dest='smbworkgroup', action='store') parser.add_argument('--winbindjoin', dest='winbindjoin', action='store') parser.add_argument('--winbindseparator', dest='winbindseparator', action='store') parser.add_argument('--winbindtemplatehomedir', dest='winbindtemplatehomedir', action='store') parser.add_argument('--winbindtemplateprimarygroup', dest='winbindtemplateprimarygroup', action='store') parser.add_argument('--winbindtemplateshell', dest='winbindtemplateshell', action='store') parser.add_argument('--enablekrb5', dest='enablekrb5', action='store_true') if '--enablekrb5' in rules: parser.add_argument('--krb5realm', dest='krb5realm', action='store', required=True) parser.add_argument('--enablehesiod', dest='enablehesiod', action='store_true') if '--enablehesiod' in rules: parser.add_argument('--hesiodlhs', dest='hesiodlhs', action='store', required=True) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autopart(rule): ''' Parse the autopart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--type', dest='type', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_autostep(rule): ''' Parse the autostep line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--autoscreenshot', dest='autoscreenshot', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_bootloader(rule): ''' Parse the bootloader line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--append', dest='append', action='store') parser.add_argument('--driveorder', dest='driveorder', action='store') parser.add_argument('--location', dest='location', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--md5pass', dest='md5pass', action='store') parser.add_argument('--upgrade', dest='upgrade', action='store_true') parser.add_argument('--timeout', dest='timeout', action='store') parser.add_argument('--boot-drive', dest='bootdrive', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_btrfs(rule): ''' Parse the btrfs line TODO: finish up the weird parsing on this one http://fedoraproject.org/wiki/Anaconda/Kickstart#btrfs ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--data', dest='data', action='store') parser.add_argument('--metadata', dest='metadata', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--subvol', dest='subvol', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_clearpart(rule): ''' Parse the clearpart line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--all', dest='all', action='store_true') parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--init_label', dest='init_label', action='store_true') parser.add_argument('--linux', dest='linux', action='store_true') parser.add_argument('--none', dest='none', action='store_true') parser.add_argument('--initlabel', dest='init_label', action='store_true') parser.add_argument('--list', dest='list', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_device(rule): ''' Parse the device line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) modulename = rules.pop(0) parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) args['modulename'] = modulename parser = None return args def parse_dmraid(rule): ''' Parse the dmraid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--dev', dest='dev', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_driverdisk(rule): ''' Parse the driverdisk line ''' if '--' not in rule: return {'partition': rule} parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--source', dest='source', action='store') parser.add_argument('--biospart', dest='biospart', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firewall(rule): ''' Parse the firewall line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--service', dest='service', action='store') parser.add_argument('--ssh', dest='ssh', action='store_true') parser.add_argument('--smtp', dest='smtp', action='store_true') parser.add_argument('--http', dest='http', action='store_true') parser.add_argument('--ftp', dest='ftp', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_firstboot(rule): ''' Parse the firstboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--enable', '--enabled', dest='enable', action='store_true') parser.add_argument('--disable', '--disabled', dest='disable', action='store_true') parser.add_argument('--reconfig', dest='reconfig', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_group(rule): ''' Parse the group line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gid', dest='gid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_harddrive(rule): ''' Parse the harddrive line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--biospart', dest='biospart', action='store') parser.add_argument('--partition', dest='partition', action='store') parser.add_argument('--dir', dest='dir', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_ignoredisk(rule): ''' Parse the ignoredisk line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--drives', dest='drives', action='store') parser.add_argument('--only-use', dest='only-use', action='store') parser.add_argument('--interactive', dest='interactive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsi(rule): ''' Parse the iscsi line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--ipaddr', dest='ipaddr', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--target', dest='target', action='store') parser.add_argument('--iface', dest='iface', action='store') parser.add_argument('--user', dest='user', action='store') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--reverse-user', dest='reverse-user', action='store') parser.add_argument('--reverse-password', dest='reverse-password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_iscsiname(rule): ''' Parse the iscsiname line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) #parser.add_argument('iqn') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_keyboard(rule): ''' Parse the keyboard line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--vckeymap', dest='vckeymap', action='store') parser.add_argument('--xlayouts', dest='xlayouts', action='store') parser.add_argument('--switch', dest='switch', action='store') parser.add_argument('keyboard') args = clean_args(vars(parser.parse_args(rules))) if 'keyboard' in args and 'xlayouts' not in args: args['xlayouts'] = args['keyboard'] parser = None return args def parse_lang(rule): ''' Parse the lang line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('lang') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logvol(rule): ''' Parse the logvol line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--percent', dest='percent', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store_true') parser.add_argument('--name', dest='name', action='store') parser.add_argument('--vgname', dest='vgname', action='store') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--label', dest='label', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_logging(rule): ''' Parse the logging line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--level', dest='level', action='store', choices=['debug', 'info', 'warning', 'error', 'critical']) args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_monitor(rule): ''' Parse the monitor line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--hsync', dest='hsync', action='store') parser.add_argument('--monitor', dest='monitor', action='store') parser.add_argument('--noprobe', dest='noprobe', action='store_true') parser.add_argument('--vsync', dest='vsync', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_multipath(rule): ''' Parse the multipath line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--rule', dest='rule', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_network(rule): ''' Parse the network line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--bootproto', dest='bootproto', action='store', choices=['dhcp', 'bootp', 'static', 'ibft']) parser.add_argument('--device', dest='device', action='store') parser.add_argument('--ip', dest='ip', action='store') parser.add_argument('--ipv6', dest='ipv6', action='store') parser.add_argument('--gateway', dest='gateway', action='store') parser.add_argument('--nodefroute', dest='nodefroute', action='store_true') parser.add_argument('--nameserver', dest='nameserver', action='store') parser.add_argument('--nodns', dest='nodns', action='store_true') parser.add_argument('--netmask', dest='netmask', action='store') parser.add_argument('--hostname', dest='hostname', action='store') parser.add_argument('--ethtool', dest='ethtool', action='store') parser.add_argument('--essid', dest='essid', action='store') parser.add_argument('--wepkey', dest='wepkey', action='store') parser.add_argument('--wpakey', dest='wpakey', action='store') parser.add_argument('--onboot', dest='onboot', action='store') parser.add_argument('--dhcpclass', dest='dhcpclass', action='store') parser.add_argument('--mtu', dest='mtu', action='store') parser.add_argument('--noipv4', dest='noipv4', action='store_true') parser.add_argument('--noipv6', dest='noipv6', action='store_true') parser.add_argument('--activate', dest='activate', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_nfs(rule): ''' Parse the nfs line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--server', dest='server', action='store') parser.add_argument('--dir', dest='dir', action='store') parser.add_argument('--opts', dest='opts', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_partition(rule): ''' Parse the partition line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('mntpoint') parser.add_argument('--size', dest='size', action='store') parser.add_argument('--grow', dest='grow', action='store_true') parser.add_argument('--maxsize', dest='maxsize', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--onpart', '--usepart', dest='onpart', action='store') parser.add_argument('--ondisk', '--ondrive', dest='ondisk', action='store') parser.add_argument('--asprimary', dest='asprimary', action='store_true') parser.add_argument('--fsprofile', dest='fsprofile', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--recommended', dest='recommended', action='store_true') parser.add_argument('--onbiosdisk', dest='onbiosdisk', action='store') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backupphrase', dest='backupphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_raid(rule): ''' Parse the raid line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('mntpoint') parser.add_argument('--level', dest='level', action='store') parser.add_argument('--device', dest='device', action='store') parser.add_argument('--spares', dest='spares', action='store') parser.add_argument('--fstype', dest='fstype', action='store') parser.add_argument('--fsoptions', dest='fsoptions', action='store') parser.add_argument('--label', dest='label', action='store') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--encrypted', dest='encrypted', action='store_true') parser.add_argument('--passphrase', dest='passphrase', action='store') parser.add_argument('--escrowcert', dest='escrowcert', action='store') parser.add_argument('--backuppassphrase', dest='backuppassphrase', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_reboot(rule): ''' Parse the reboot line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--eject', dest='eject', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_repo(rule): ''' Parse the repo line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--baseurl', dest='baseurl', action='store') parser.add_argument('--mirrorlist', dest='mirrorlist', action='store') parser.add_argument('--cost', dest='cost', action='store') parser.add_argument('--excludepkgs', dest='excludepkgs', action='store') parser.add_argument('--includepkgs', dest='includepkgs', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--ignoregroups', dest='ignoregroups', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rescue(rule): ''' Parse the rescue line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--nomount', dest='nomount', action='store_true') parser.add_argument('--romount', dest='romount', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_rootpw(rule): ''' Parse the rootpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('password') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_selinux(rule): ''' Parse the selinux line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store_true') parser.add_argument('--enforcing', dest='enforcing', action='store_true') parser.add_argument('--permissive', dest='permissive', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_services(rule): ''' Parse the services line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--disabled', dest='disabled', action='store') parser.add_argument('--enabled', dest='enabled', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_sshpw(rule): ''' Parse the sshpw line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--username', dest='username', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--lock', dest='lock', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_timezone(rule): ''' Parse the timezone line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--utc', dest='utc', action='store_true') parser.add_argument('--nontp', dest='nontp', action='store_true') parser.add_argument('--ntpservers', dest='ntpservers', action='store') parser.add_argument('--isUtc', dest='isutc', action='store_true') parser.add_argument('timezone') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_updates(rule): ''' Parse the updates line ''' rules = shlex.split(rule) rules.pop(0) return {'url': rules[0]} if rules else True def parse_upgrade(rule): ''' Parse the upgrade line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--root-device', dest='root-device', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None if args: return args return True def parse_url(rule): ''' Parse the url line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--url', dest='url', action='store') parser.add_argument('--proxy', dest='proxy', action='store') parser.add_argument('--noverifyssl', dest='noverifyssl', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_user(rule): ''' Parse the user line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--name', dest='name', action='store') parser.add_argument('--gecos', dest='gecos', action='store') parser.add_argument('--groups', dest='groups', action='store') parser.add_argument('--homedir', dest='homedir', action='store') parser.add_argument('--lock', dest='lock', action='store_true') parser.add_argument('--password', dest='password', action='store') parser.add_argument('--iscrypted', dest='iscrypted', action='store_true') parser.add_argument('--plaintext', dest='plaintext', action='store_true') parser.add_argument('--shell', dest='shell', action='store') parser.add_argument('--uid', dest='uid', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_vnc(rule): ''' Parse the vnc line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--host', dest='host', action='store') parser.add_argument('--port', dest='port', action='store') parser.add_argument('--password', dest='password', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_volgroup(rule): ''' Parse the volgroup line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) partitions = [] newrules = [] for count in range(0, len(rules)): if count == 0: newrules.append(rules[count]) continue elif rules[count].startswith('--'): newrules.append(rules[count]) continue else: partitions.append(rules[count]) rules = newrules parser.add_argument('name') parser.add_argument('--noformat', dest='noformat', action='store_true') parser.add_argument('--useexisting', dest='useexisting', action='store_true') parser.add_argument('--pesize', dest='pesize', action='store') parser.add_argument('--reserved-space', dest='reserved-space', action='store') parser.add_argument('--reserved-percent', dest='reserved-percent', action='store') args = clean_args(vars(parser.parse_args(rules))) if partitions: args['partitions'] = partitions parser = None return args def parse_xconfig(rule): ''' Parse the xconfig line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--defaultdesktop', dest='defaultdesktop', action='store') parser.add_argument('--startxonboot', dest='startxonboot', action='store_true') args = clean_args(vars(parser.parse_args(rules))) parser = None return args def parse_zfcp(rule): ''' Parse the zfcp line ''' parser = argparse.ArgumentParser() rules = shlex.split(rule) rules.pop(0) parser.add_argument('--devnum', dest='devnum', action='store') parser.add_argument('--fcplun', dest='fcplun', action='store') parser.add_argument('--wwpn', dest='wwpn', action='store') args = clean_args(vars(parser.parse_args(rules))) parser = None return args
saltstack/salt
salt/modules/mac_power.py
_validate_sleep
python
def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg)
Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L33-L72
null
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
set_sleep
python
def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state)
Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L96-L125
[ "def execute_return_success(cmd):\n '''\n Executes the passed command. Returns True if successful\n\n :param str cmd: The command to run\n\n :return: True if successful, otherwise False\n :rtype: bool\n\n :raises: Error if command fails or is not supported\n '''\n\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return True\n", "def _validate_sleep(minutes):\n '''\n Helper function that validates the minutes parameter. Can be any number\n between 1 and 180. Can also be the string values \"Never\" and \"Off\".\n\n Because \"On\" and \"Off\" get converted to boolean values on the command line\n it will error if \"On\" is passed\n\n Returns: The value to be passed to the command\n '''\n # Must be a value between 1 and 180 or Never/Off\n if isinstance(minutes, six.string_types):\n if minutes.lower() in ['never', 'off']:\n return 'Never'\n else:\n msg = 'Invalid String Value for Minutes.\\n' \\\n 'String values must be \"Never\" or \"Off\".\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n elif isinstance(minutes, bool):\n if minutes:\n msg = 'Invalid Boolean Value for Minutes.\\n' \\\n 'Boolean value \"On\" or \"True\" is not allowed.\\n' \\\n 'Salt CLI converts \"On\" to boolean True.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n else:\n return 'Never'\n elif isinstance(minutes, int):\n if minutes in range(1, 181):\n return minutes\n else:\n msg = 'Invalid Integer Value for Minutes.\\n' \\\n 'Integer values must be between 1 and 180.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n else:\n msg = 'Unknown Variable Type Passed for Minutes.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n", "def confirm_updated(value, check_fun, normalize_ret=False, wait=5):\n '''\n Wait up to ``wait`` seconds for a system parameter to be changed before\n deciding it hasn't changed.\n\n :param str value: The value indicating a successful change\n\n :param function check_fun: The function whose return is compared with\n ``value``\n\n :param bool normalize_ret: Whether to normalize the return from\n ``check_fun`` with ``validate_enabled``\n\n :param int wait: The maximum amount of seconds to wait for a system\n parameter to change\n '''\n for i in range(wait):\n state = validate_enabled(check_fun()) if normalize_ret else check_fun()\n if value in state:\n return True\n time.sleep(1)\n return False\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
get_computer_sleep
python
def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret)
Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L128-L143
[ "def execute_return_result(cmd):\n '''\n Executes the passed command. Returns the standard out if successful\n\n :param str cmd: The command to run\n\n :return: The standard out of the command if successful, otherwise returns\n an error\n :rtype: str\n\n :raises: Error if command fails or is not supported\n '''\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret['stdout']\n", "def parse_return(data):\n '''\n Returns the data portion of a string that is colon separated.\n\n :param str data: The string that contains the data to be parsed. Usually the\n standard out from a command\n\n For example:\n ``Time Zone: America/Denver``\n will return:\n ``America/Denver``\n '''\n\n if ': ' in data:\n return data.split(': ')[1]\n if ':\\n' in data:\n return data.split(':\\n')[1]\n else:\n return data\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
set_computer_sleep
python
def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, )
Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L146-L171
[ "def execute_return_success(cmd):\n '''\n Executes the passed command. Returns True if successful\n\n :param str cmd: The command to run\n\n :return: True if successful, otherwise False\n :rtype: bool\n\n :raises: Error if command fails or is not supported\n '''\n\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return True\n", "def _validate_sleep(minutes):\n '''\n Helper function that validates the minutes parameter. Can be any number\n between 1 and 180. Can also be the string values \"Never\" and \"Off\".\n\n Because \"On\" and \"Off\" get converted to boolean values on the command line\n it will error if \"On\" is passed\n\n Returns: The value to be passed to the command\n '''\n # Must be a value between 1 and 180 or Never/Off\n if isinstance(minutes, six.string_types):\n if minutes.lower() in ['never', 'off']:\n return 'Never'\n else:\n msg = 'Invalid String Value for Minutes.\\n' \\\n 'String values must be \"Never\" or \"Off\".\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n elif isinstance(minutes, bool):\n if minutes:\n msg = 'Invalid Boolean Value for Minutes.\\n' \\\n 'Boolean value \"On\" or \"True\" is not allowed.\\n' \\\n 'Salt CLI converts \"On\" to boolean True.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n else:\n return 'Never'\n elif isinstance(minutes, int):\n if minutes in range(1, 181):\n return minutes\n else:\n msg = 'Invalid Integer Value for Minutes.\\n' \\\n 'Integer values must be between 1 and 180.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n else:\n msg = 'Unknown Variable Type Passed for Minutes.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n", "def confirm_updated(value, check_fun, normalize_ret=False, wait=5):\n '''\n Wait up to ``wait`` seconds for a system parameter to be changed before\n deciding it hasn't changed.\n\n :param str value: The value indicating a successful change\n\n :param function check_fun: The function whose return is compared with\n ``value``\n\n :param bool normalize_ret: Whether to normalize the return from\n ``check_fun`` with ``validate_enabled``\n\n :param int wait: The maximum amount of seconds to wait for a system\n parameter to change\n '''\n for i in range(wait):\n state = validate_enabled(check_fun()) if normalize_ret else check_fun()\n if value in state:\n return True\n time.sleep(1)\n return False\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
get_display_sleep
python
def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret)
Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L174-L189
[ "def execute_return_result(cmd):\n '''\n Executes the passed command. Returns the standard out if successful\n\n :param str cmd: The command to run\n\n :return: The standard out of the command if successful, otherwise returns\n an error\n :rtype: str\n\n :raises: Error if command fails or is not supported\n '''\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret['stdout']\n", "def parse_return(data):\n '''\n Returns the data portion of a string that is colon separated.\n\n :param str data: The string that contains the data to be parsed. Usually the\n standard out from a command\n\n For example:\n ``Time Zone: America/Denver``\n will return:\n ``America/Denver``\n '''\n\n if ': ' in data:\n return data.split(': ')[1]\n if ':\\n' in data:\n return data.split(':\\n')[1]\n else:\n return data\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
set_display_sleep
python
def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, )
Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L192-L217
[ "def execute_return_success(cmd):\n '''\n Executes the passed command. Returns True if successful\n\n :param str cmd: The command to run\n\n :return: True if successful, otherwise False\n :rtype: bool\n\n :raises: Error if command fails or is not supported\n '''\n\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return True\n", "def _validate_sleep(minutes):\n '''\n Helper function that validates the minutes parameter. Can be any number\n between 1 and 180. Can also be the string values \"Never\" and \"Off\".\n\n Because \"On\" and \"Off\" get converted to boolean values on the command line\n it will error if \"On\" is passed\n\n Returns: The value to be passed to the command\n '''\n # Must be a value between 1 and 180 or Never/Off\n if isinstance(minutes, six.string_types):\n if minutes.lower() in ['never', 'off']:\n return 'Never'\n else:\n msg = 'Invalid String Value for Minutes.\\n' \\\n 'String values must be \"Never\" or \"Off\".\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n elif isinstance(minutes, bool):\n if minutes:\n msg = 'Invalid Boolean Value for Minutes.\\n' \\\n 'Boolean value \"On\" or \"True\" is not allowed.\\n' \\\n 'Salt CLI converts \"On\" to boolean True.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n else:\n return 'Never'\n elif isinstance(minutes, int):\n if minutes in range(1, 181):\n return minutes\n else:\n msg = 'Invalid Integer Value for Minutes.\\n' \\\n 'Integer values must be between 1 and 180.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n else:\n msg = 'Unknown Variable Type Passed for Minutes.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n", "def confirm_updated(value, check_fun, normalize_ret=False, wait=5):\n '''\n Wait up to ``wait`` seconds for a system parameter to be changed before\n deciding it hasn't changed.\n\n :param str value: The value indicating a successful change\n\n :param function check_fun: The function whose return is compared with\n ``value``\n\n :param bool normalize_ret: Whether to normalize the return from\n ``check_fun`` with ``validate_enabled``\n\n :param int wait: The maximum amount of seconds to wait for a system\n parameter to change\n '''\n for i in range(wait):\n state = validate_enabled(check_fun()) if normalize_ret else check_fun()\n if value in state:\n return True\n time.sleep(1)\n return False\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
get_harddisk_sleep
python
def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret)
Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L220-L235
[ "def execute_return_result(cmd):\n '''\n Executes the passed command. Returns the standard out if successful\n\n :param str cmd: The command to run\n\n :return: The standard out of the command if successful, otherwise returns\n an error\n :rtype: str\n\n :raises: Error if command fails or is not supported\n '''\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret['stdout']\n", "def parse_return(data):\n '''\n Returns the data portion of a string that is colon separated.\n\n :param str data: The string that contains the data to be parsed. Usually the\n standard out from a command\n\n For example:\n ``Time Zone: America/Denver``\n will return:\n ``America/Denver``\n '''\n\n if ': ' in data:\n return data.split(': ')[1]\n if ':\\n' in data:\n return data.split(':\\n')[1]\n else:\n return data\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
set_harddisk_sleep
python
def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, )
Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L238-L263
[ "def execute_return_success(cmd):\n '''\n Executes the passed command. Returns True if successful\n\n :param str cmd: The command to run\n\n :return: True if successful, otherwise False\n :rtype: bool\n\n :raises: Error if command fails or is not supported\n '''\n\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return True\n", "def _validate_sleep(minutes):\n '''\n Helper function that validates the minutes parameter. Can be any number\n between 1 and 180. Can also be the string values \"Never\" and \"Off\".\n\n Because \"On\" and \"Off\" get converted to boolean values on the command line\n it will error if \"On\" is passed\n\n Returns: The value to be passed to the command\n '''\n # Must be a value between 1 and 180 or Never/Off\n if isinstance(minutes, six.string_types):\n if minutes.lower() in ['never', 'off']:\n return 'Never'\n else:\n msg = 'Invalid String Value for Minutes.\\n' \\\n 'String values must be \"Never\" or \"Off\".\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n elif isinstance(minutes, bool):\n if minutes:\n msg = 'Invalid Boolean Value for Minutes.\\n' \\\n 'Boolean value \"On\" or \"True\" is not allowed.\\n' \\\n 'Salt CLI converts \"On\" to boolean True.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n else:\n return 'Never'\n elif isinstance(minutes, int):\n if minutes in range(1, 181):\n return minutes\n else:\n msg = 'Invalid Integer Value for Minutes.\\n' \\\n 'Integer values must be between 1 and 180.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n else:\n msg = 'Unknown Variable Type Passed for Minutes.\\n' \\\n 'Passed: {0}'.format(minutes)\n raise SaltInvocationError(msg)\n", "def confirm_updated(value, check_fun, normalize_ret=False, wait=5):\n '''\n Wait up to ``wait`` seconds for a system parameter to be changed before\n deciding it hasn't changed.\n\n :param str value: The value indicating a successful change\n\n :param function check_fun: The function whose return is compared with\n ``value``\n\n :param bool normalize_ret: Whether to normalize the return from\n ``check_fun`` with ``validate_enabled``\n\n :param int wait: The maximum amount of seconds to wait for a system\n parameter to change\n '''\n for i in range(wait):\n state = validate_enabled(check_fun()) if normalize_ret else check_fun()\n if value in state:\n return True\n time.sleep(1)\n return False\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
get_wake_on_modem
python
def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L266-L282
[ "def execute_return_result(cmd):\n '''\n Executes the passed command. Returns the standard out if successful\n\n :param str cmd: The command to run\n\n :return: The standard out of the command if successful, otherwise returns\n an error\n :rtype: str\n\n :raises: Error if command fails or is not supported\n '''\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret['stdout']\n", "def parse_return(data):\n '''\n Returns the data portion of a string that is colon separated.\n\n :param str data: The string that contains the data to be parsed. Usually the\n standard out from a command\n\n For example:\n ``Time Zone: America/Denver``\n will return:\n ``America/Denver``\n '''\n\n if ': ' in data:\n return data.split(': ')[1]\n if ':\\n' in data:\n return data.split(':\\n')[1]\n else:\n return data\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
set_wake_on_modem
python
def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, )
Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L285-L310
[ "def execute_return_success(cmd):\n '''\n Executes the passed command. Returns True if successful\n\n :param str cmd: The command to run\n\n :return: True if successful, otherwise False\n :rtype: bool\n\n :raises: Error if command fails or is not supported\n '''\n\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return True\n", "def confirm_updated(value, check_fun, normalize_ret=False, wait=5):\n '''\n Wait up to ``wait`` seconds for a system parameter to be changed before\n deciding it hasn't changed.\n\n :param str value: The value indicating a successful change\n\n :param function check_fun: The function whose return is compared with\n ``value``\n\n :param bool normalize_ret: Whether to normalize the return from\n ``check_fun`` with ``validate_enabled``\n\n :param int wait: The maximum amount of seconds to wait for a system\n parameter to change\n '''\n for i in range(wait):\n state = validate_enabled(check_fun()) if normalize_ret else check_fun()\n if value in state:\n return True\n time.sleep(1)\n return False\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
get_wake_on_network
python
def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L313-L329
[ "def execute_return_result(cmd):\n '''\n Executes the passed command. Returns the standard out if successful\n\n :param str cmd: The command to run\n\n :return: The standard out of the command if successful, otherwise returns\n an error\n :rtype: str\n\n :raises: Error if command fails or is not supported\n '''\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret['stdout']\n", "def parse_return(data):\n '''\n Returns the data portion of a string that is colon separated.\n\n :param str data: The string that contains the data to be parsed. Usually the\n standard out from a command\n\n For example:\n ``Time Zone: America/Denver``\n will return:\n ``America/Denver``\n '''\n\n if ': ' in data:\n return data.split(': ')[1]\n if ':\\n' in data:\n return data.split(':\\n')[1]\n else:\n return data\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
set_wake_on_network
python
def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, )
Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L332-L357
[ "def execute_return_success(cmd):\n '''\n Executes the passed command. Returns True if successful\n\n :param str cmd: The command to run\n\n :return: True if successful, otherwise False\n :rtype: bool\n\n :raises: Error if command fails or is not supported\n '''\n\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return True\n", "def confirm_updated(value, check_fun, normalize_ret=False, wait=5):\n '''\n Wait up to ``wait`` seconds for a system parameter to be changed before\n deciding it hasn't changed.\n\n :param str value: The value indicating a successful change\n\n :param function check_fun: The function whose return is compared with\n ``value``\n\n :param bool normalize_ret: Whether to normalize the return from\n ``check_fun`` with ``validate_enabled``\n\n :param int wait: The maximum amount of seconds to wait for a system\n parameter to change\n '''\n for i in range(wait):\n state = validate_enabled(check_fun()) if normalize_ret else check_fun()\n if value in state:\n return True\n time.sleep(1)\n return False\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
get_restart_power_failure
python
def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L360-L376
[ "def execute_return_result(cmd):\n '''\n Executes the passed command. Returns the standard out if successful\n\n :param str cmd: The command to run\n\n :return: The standard out of the command if successful, otherwise returns\n an error\n :rtype: str\n\n :raises: Error if command fails or is not supported\n '''\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret['stdout']\n", "def parse_return(data):\n '''\n Returns the data portion of a string that is colon separated.\n\n :param str data: The string that contains the data to be parsed. Usually the\n standard out from a command\n\n For example:\n ``Time Zone: America/Denver``\n will return:\n ``America/Denver``\n '''\n\n if ': ' in data:\n return data.split(': ')[1]\n if ':\\n' in data:\n return data.split(':\\n')[1]\n else:\n return data\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
set_restart_power_failure
python
def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, )
Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L379-L404
[ "def execute_return_success(cmd):\n '''\n Executes the passed command. Returns True if successful\n\n :param str cmd: The command to run\n\n :return: True if successful, otherwise False\n :rtype: bool\n\n :raises: Error if command fails or is not supported\n '''\n\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return True\n", "def confirm_updated(value, check_fun, normalize_ret=False, wait=5):\n '''\n Wait up to ``wait`` seconds for a system parameter to be changed before\n deciding it hasn't changed.\n\n :param str value: The value indicating a successful change\n\n :param function check_fun: The function whose return is compared with\n ``value``\n\n :param bool normalize_ret: Whether to normalize the return from\n ``check_fun`` with ``validate_enabled``\n\n :param int wait: The maximum amount of seconds to wait for a system\n parameter to change\n '''\n for i in range(wait):\n state = validate_enabled(check_fun()) if normalize_ret else check_fun()\n if value in state:\n return True\n time.sleep(1)\n return False\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
get_restart_freeze
python
def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L407-L423
[ "def execute_return_result(cmd):\n '''\n Executes the passed command. Returns the standard out if successful\n\n :param str cmd: The command to run\n\n :return: The standard out of the command if successful, otherwise returns\n an error\n :rtype: str\n\n :raises: Error if command fails or is not supported\n '''\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret['stdout']\n", "def parse_return(data):\n '''\n Returns the data portion of a string that is colon separated.\n\n :param str data: The string that contains the data to be parsed. Usually the\n standard out from a command\n\n For example:\n ``Time Zone: America/Denver``\n will return:\n ``America/Denver``\n '''\n\n if ': ' in data:\n return data.split(': ')[1]\n if ':\\n' in data:\n return data.split(':\\n')[1]\n else:\n return data\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
set_restart_freeze
python
def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True )
Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L426-L454
[ "def execute_return_success(cmd):\n '''\n Executes the passed command. Returns True if successful\n\n :param str cmd: The command to run\n\n :return: True if successful, otherwise False\n :rtype: bool\n\n :raises: Error if command fails or is not supported\n '''\n\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return True\n", "def confirm_updated(value, check_fun, normalize_ret=False, wait=5):\n '''\n Wait up to ``wait`` seconds for a system parameter to be changed before\n deciding it hasn't changed.\n\n :param str value: The value indicating a successful change\n\n :param function check_fun: The function whose return is compared with\n ``value``\n\n :param bool normalize_ret: Whether to normalize the return from\n ``check_fun`` with ``validate_enabled``\n\n :param int wait: The maximum amount of seconds to wait for a system\n parameter to change\n '''\n for i in range(wait):\n state = validate_enabled(check_fun()) if normalize_ret else check_fun()\n if value in state:\n return True\n time.sleep(1)\n return False\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
get_sleep_on_power_button
python
def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L457-L476
[ "def execute_return_result(cmd):\n '''\n Executes the passed command. Returns the standard out if successful\n\n :param str cmd: The command to run\n\n :return: The standard out of the command if successful, otherwise returns\n an error\n :rtype: str\n\n :raises: Error if command fails or is not supported\n '''\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return ret['stdout']\n", "def parse_return(data):\n '''\n Returns the data portion of a string that is colon separated.\n\n :param str data: The string that contains the data to be parsed. Usually the\n standard out from a command\n\n For example:\n ``Time Zone: America/Denver``\n will return:\n ``America/Denver``\n '''\n\n if ': ' in data:\n return data.split(': ')[1]\n if ':\\n' in data:\n return data.split(':\\n')[1]\n else:\n return data\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
saltstack/salt
salt/modules/mac_power.py
set_sleep_on_power_button
python
def set_sleep_on_power_button(enabled): ''' Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setallowpowerbuttontosleepcomputer {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_sleep_on_power_button, )
Set whether or not the power button can sleep the computer. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep_on_power_button True
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_power.py#L479-L503
[ "def execute_return_success(cmd):\n '''\n Executes the passed command. Returns True if successful\n\n :param str cmd: The command to run\n\n :return: True if successful, otherwise False\n :rtype: bool\n\n :raises: Error if command fails or is not supported\n '''\n\n ret = _run_all(cmd)\n\n if ret['retcode'] != 0 or 'not supported' in ret['stdout'].lower():\n msg = 'Command Failed: {0}\\n'.format(cmd)\n msg += 'Return Code: {0}\\n'.format(ret['retcode'])\n msg += 'Output: {0}\\n'.format(ret['stdout'])\n msg += 'Error: {0}\\n'.format(ret['stderr'])\n raise CommandExecutionError(msg)\n\n return True\n", "def confirm_updated(value, check_fun, normalize_ret=False, wait=5):\n '''\n Wait up to ``wait`` seconds for a system parameter to be changed before\n deciding it hasn't changed.\n\n :param str value: The value indicating a successful change\n\n :param function check_fun: The function whose return is compared with\n ``value``\n\n :param bool normalize_ret: Whether to normalize the return from\n ``check_fun`` with ``validate_enabled``\n\n :param int wait: The maximum amount of seconds to wait for a system\n parameter to change\n '''\n for i in range(wait):\n state = validate_enabled(check_fun()) if normalize_ret else check_fun()\n if value in state:\n return True\n time.sleep(1)\n return False\n", "def validate_enabled(enabled):\n '''\n Helper function to validate the enabled parameter. Boolean values are\n converted to \"on\" and \"off\". String values are checked to make sure they are\n either \"on\" or \"off\"/\"yes\" or \"no\". Integer ``0`` will return \"off\". All\n other integers will return \"on\"\n\n :param enabled: Enabled can be boolean True or False, Integers, or string\n values \"on\" and \"off\"/\"yes\" and \"no\".\n :type: str, int, bool\n\n :return: \"on\" or \"off\" or errors\n :rtype: str\n '''\n if isinstance(enabled, six.string_types):\n if enabled.lower() not in ['on', 'off', 'yes', 'no']:\n msg = '\\nMac Power: Invalid String Value for Enabled.\\n' \\\n 'String values must be \\'on\\' or \\'off\\'/\\'yes\\' or \\'no\\'.\\n' \\\n 'Passed: {0}'.format(enabled)\n raise SaltInvocationError(msg)\n\n return 'on' if enabled.lower() in ['on', 'yes'] else 'off'\n\n return 'on' if bool(enabled) else 'off'\n" ]
# -*- coding: utf-8 -*- ''' Module for editing power settings on macOS .. versionadded:: 2016.3.0 ''' # Import python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.mac_utils import salt.utils.platform from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import range __virtualname__ = 'power' def __virtual__(): ''' Only for macOS ''' if not salt.utils.platform.is_darwin(): return (False, 'The mac_power module could not be loaded: ' 'module only works on macOS systems.') return __virtualname__ def _validate_sleep(minutes): ''' Helper function that validates the minutes parameter. Can be any number between 1 and 180. Can also be the string values "Never" and "Off". Because "On" and "Off" get converted to boolean values on the command line it will error if "On" is passed Returns: The value to be passed to the command ''' # Must be a value between 1 and 180 or Never/Off if isinstance(minutes, six.string_types): if minutes.lower() in ['never', 'off']: return 'Never' else: msg = 'Invalid String Value for Minutes.\n' \ 'String values must be "Never" or "Off".\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) elif isinstance(minutes, bool): if minutes: msg = 'Invalid Boolean Value for Minutes.\n' \ 'Boolean value "On" or "True" is not allowed.\n' \ 'Salt CLI converts "On" to boolean True.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: return 'Never' elif isinstance(minutes, int): if minutes in range(1, 181): return minutes else: msg = 'Invalid Integer Value for Minutes.\n' \ 'Integer values must be between 1 and 180.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) else: msg = 'Unknown Variable Type Passed for Minutes.\n' \ 'Passed: {0}'.format(minutes) raise SaltInvocationError(msg) def get_sleep(): ''' Displays the amount of idle time until the machine sleeps. Settings for Computer, Display, and Hard Disk are displayed. :return: A dictionary containing the sleep status for Computer, Display, and Hard Disk :rtype: dict CLI Example: .. code-block:: bash salt '*' power.get_sleep ''' return {'Computer': get_computer_sleep(), 'Display': get_display_sleep(), 'Hard Disk': get_harddisk_sleep()} def set_sleep(minutes): ''' Sets the amount of idle time until the machine sleeps. Sets the same value for Computer, Display, and Hard Disk. Pass "Never" or "Off" for computers that should never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_sleep 120 salt '*' power.set_sleep never ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setsleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) state = [] for check in (get_computer_sleep, get_display_sleep, get_harddisk_sleep): state.append(salt.utils.mac_utils.confirm_updated( value, check, )) return all(state) def get_computer_sleep(): ''' Display the amount of idle time until the computer sleeps. :return: A string representing the sleep settings for the computer :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_computer_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getcomputersleep') return salt.utils.mac_utils.parse_return(ret) def set_computer_sleep(minutes): ''' Set the amount of idle time until the computer sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_computer_sleep 120 salt '*' power.set_computer_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setcomputersleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_computer_sleep, ) def get_display_sleep(): ''' Display the amount of idle time until the display sleeps. :return: A string representing the sleep settings for the displey :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_display_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getdisplaysleep') return salt.utils.mac_utils.parse_return(ret) def set_display_sleep(minutes): ''' Set the amount of idle time until the display sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_display_sleep 120 salt '*' power.set_display_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setdisplaysleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_display_sleep, ) def get_harddisk_sleep(): ''' Display the amount of idle time until the hard disk sleeps. :return: A string representing the sleep settings for the hard disk :rtype: str CLI Example: ..code-block:: bash salt '*' power.get_harddisk_sleep ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getharddisksleep') return salt.utils.mac_utils.parse_return(ret) def set_harddisk_sleep(minutes): ''' Set the amount of idle time until the harddisk sleeps. Pass "Never" of "Off" to never sleep. :param minutes: Can be an integer between 1 and 180 or "Never" or "Off" :ptype: int, str :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_harddisk_sleep 120 salt '*' power.set_harddisk_sleep off ''' value = _validate_sleep(minutes) cmd = 'systemsetup -setharddisksleep {0}'.format(value) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( str(value), get_harddisk_sleep, ) def get_wake_on_modem(): ''' Displays whether 'wake on modem' is on or off if supported :return: A string value representing the "wake on modem" settings :rtype: str CLI Example: .. code-block:: bash salt '*' power.get_wake_on_modem ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonmodem') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_modem(enabled): ''' Set whether or not the computer will wake from sleep when modem activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_modem True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonmodem {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_modem, ) def get_wake_on_network(): ''' Displays whether 'wake on network' is on or off if supported :return: A string value representing the "wake on network" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_wake_on_network ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getwakeonnetworkaccess') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_wake_on_network(enabled): ''' Set whether or not the computer will wake from sleep when network activity is detected. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_wake_on_network True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setwakeonnetworkaccess {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_wake_on_network, ) def get_restart_power_failure(): ''' Displays whether 'restart on power failure' is on or off if supported :return: A string value representing the "restart on power failure" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_power_failure ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartpowerfailure') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, ) def get_restart_freeze(): ''' Displays whether 'restart on freeze' is on or off if supported :return: A string value representing the "restart on freeze" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_restart_freeze ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getrestartfreeze') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on' def set_restart_freeze(enabled): ''' Specifies whether the server restarts automatically after a system freeze. This setting doesn't seem to be editable. The command completes successfully but the setting isn't actually updated. This is probably a macOS. The functions remains in case they ever fix the bug. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_freeze True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartfreeze {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_freeze, True ) def get_sleep_on_power_button(): ''' Displays whether 'allow power button to sleep computer' is on or off if supported :return: A string value representing the "allow power button to sleep computer" settings :rtype: string CLI Example: .. code-block:: bash salt '*' power.get_sleep_on_power_button ''' ret = salt.utils.mac_utils.execute_return_result( 'systemsetup -getallowpowerbuttontosleepcomputer') return salt.utils.mac_utils.validate_enabled( salt.utils.mac_utils.parse_return(ret)) == 'on'
saltstack/salt
salt/cloud/clouds/pyrax.py
get_conn
python
def get_conn(conn_type): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() kwargs = vm_.copy() # pylint: disable=E1103 kwargs['username'] = vm_['username'] kwargs['auth_endpoint'] = vm_.get('identity_url', None) kwargs['region'] = vm_['compute_region'] conn = getattr(suop, conn_type) return conn(**kwargs)
Return a conn object for the passed VM data
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/pyrax.py#L63-L77
[ "def get_configured_provider():\n '''\n Return the first configured instance.\n '''\n return config.is_provider_configured(\n __opts__,\n __active_provider_name__ or __virtualname__,\n ('username', 'identity_url', 'compute_region',)\n )\n" ]
# -*- coding: utf-8 -*- ''' Pyrax Cloud Module ================== PLEASE NOTE: This module is currently in early development, and considered to be experimental and unstable. It is not recommended for production use. Unless you are actively developing code in this module, you should use the OpenStack module instead. ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals # Import salt libs import salt.utils.data import salt.config as config # Import pyrax libraries # This is typically against SaltStack coding styles, # it should be 'import salt.utils.openstack.pyrax as suop'. Something # in the loader is creating a name clash and making that form fail from salt.utils.openstack import pyrax as suop __virtualname__ = 'pyrax' # Only load in this module is the PYRAX configurations are in place def __virtual__(): ''' Check for Pyrax configurations ''' if get_configured_provider() is False: return False if get_dependencies() is False: return False return __virtualname__ def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( __opts__, __active_provider_name__ or __virtualname__, ('username', 'identity_url', 'compute_region',) ) def get_dependencies(): ''' Warn if dependencies aren't met. ''' return config.check_driver_dependencies( __virtualname__, {'pyrax': suop.HAS_PYRAX} ) def queues_exists(call, kwargs): conn = get_conn('RackspaceQueues') return conn.exists(kwargs['name']) def queues_show(call, kwargs): conn = get_conn('RackspaceQueues') return salt.utils.data.simple_types_filter(conn.show(kwargs['name']).__dict__) def queues_create(call, kwargs): conn = get_conn('RackspaceQueues') if conn.create(kwargs['name']): return salt.utils.data.simple_types_filter(conn.show(kwargs['name']).__dict__) else: return {} def queues_delete(call, kwargs): conn = get_conn('RackspaceQueues') if conn.delete(kwargs['name']): return {} else: return salt.utils.data.simple_types_filter(conn.show(kwargs['name'].__dict__))
saltstack/salt
salt/modules/swarm.py
swarm_init
python
def swarm_init(advertise_addr=str, listen_addr=int, force_new_cluster=bool): ''' Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False ''' try: salt_return = {} __context__['client'].swarm.init(advertise_addr, listen_addr, force_new_cluster) output = 'Docker swarm has been initialized on {0} ' \ 'and the worker/manager Join token is below'.format(__context__['server_name']) salt_return.update({'Comment': output, 'Tokens': swarm_tokens()}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing advertise_addr, ' 'listen_addr and force_new_cluster correctly.'}) return salt_return
Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swarm.py#L72-L112
[ "def swarm_tokens():\n '''\n Get the Docker Swarm Manager or Worker join tokens\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' swarm.swarm_tokens\n '''\n client = docker.APIClient(base_url='unix://var/run/docker.sock')\n service = client.inspect_swarm()\n return service['JoinTokens']\n" ]
# -*- coding: utf-8 -*- ''' Docker Swarm Module using Docker's Python SDK ============================================= :codeauthor: Tyler Jones <jonestyler806@gmail.com> .. versionadded:: 2018.3.0 The Docker Swarm Module is used to manage and create Docker Swarms. Dependencies ------------ - Docker installed on the host - Docker python sdk >= 2.5.1 Docker Python SDK ----------------- .. code-block:: bash pip install -U docker More information: https://docker-py.readthedocs.io/en/stable/ ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json try: import docker HAS_DOCKER = True except ImportError: HAS_DOCKER = False __virtualname__ = 'swarm' def __virtual__(): ''' Load this module if the docker python module is installed ''' if HAS_DOCKER: return __virtualname__ return False, 'The swarm module failed to load: Docker python module is not available.' def __init__(self): if HAS_DOCKER: __context__['client'] = docker.from_env() __context__['server_name'] = __grains__['id'] def swarm_tokens(): ''' Get the Docker Swarm Manager or Worker join tokens CLI Example: .. code-block:: bash salt '*' swarm.swarm_tokens ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_swarm() return service['JoinTokens'] def joinswarm(remote_addr=int, listen_addr=int, token=str): ''' Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il' ''' try: salt_return = {} __context__['client'].swarm.join(remote_addrs=[remote_addr], listen_addr=listen_addr, join_token=token) output = __context__['server_name'] + ' has joined the Swarm' salt_return.update({'Comment': output, 'Manager_Addr': remote_addr}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are ' 'passing remote_addr, listen_addr and token correctly.'}) return salt_return def leave_swarm(force=bool): ''' Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False ''' salt_return = {} __context__['client'].swarm.leave(force=force) output = __context__['server_name'] + ' has left the swarm' salt_return.update({'Comment': output}) return salt_return def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return def swarm_service_info(service_name=str): ''' Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_service(service=service_name) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) version = dump['Version']['Index'] name = dump['Spec']['Name'] network_mode = dump['Spec']['EndpointSpec']['Mode'] ports = dump['Spec']['EndpointSpec']['Ports'] swarm_id = dump['ID'] create_date = dump['CreatedAt'] update_date = dump['UpdatedAt'] labels = dump['Spec']['Labels'] replicas = dump['Spec']['Mode']['Replicated']['Replicas'] network = dump['Endpoint']['VirtualIPs'] image = dump['Spec']['TaskTemplate']['ContainerSpec']['Image'] for items in ports: published_port = items['PublishedPort'] target_port = items['TargetPort'] published_mode = items['PublishMode'] protocol = items['Protocol'] salt_return.update({'Service Name': name, 'Replicas': replicas, 'Service ID': swarm_id, 'Network': network, 'Network Mode': network_mode, 'Creation Date': create_date, 'Update Date': update_date, 'Published Port': published_port, 'Target Port': target_port, 'Published Mode': published_mode, 'Protocol': protocol, 'Docker Image': image, 'Minion Id': __context__['server_name'], 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'service_name arg is missing?'}) return salt_return def remove_service(service=str): ''' Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.remove_service(service) salt_return.update({'Service Deleted': service, 'Minion ID': __context__['server_name']}) except TypeError: salt_return = {} salt_return.update({'Error': 'service arg is missing?'}) return salt_return def node_ls(server=str): ''' Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1 ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.nodes(filters=({'name': server})) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) for items in dump: docker_version = items['Description']['Engine']['EngineVersion'] platform = items['Description']['Platform'] hostnames = items['Description']['Hostname'] ids = items['ID'] role = items['Spec']['Role'] availability = items['Spec']['Availability'] status = items['Status'] version = items['Version']['Index'] salt_return.update({'Docker Version': docker_version, 'Platform': platform, 'Hostname': hostnames, 'ID': ids, 'Roles': role, 'Availability': availability, 'Status': status, 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'}) return salt_return def remove_node(node_id=str, force=bool): ''' Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: if force == 'True': service = client.remove_node(node_id, force=True) return service else: service = client.remove_node(node_id, force=False) return service except TypeError: salt_return = {} salt_return.update({'Error': 'Is the node_id and/or force=True/False missing?'}) return salt_return def update_node(availability=str, node_name=str, role=str, node_id=str, version=int): ''' Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19 ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: salt_return = {} node_spec = {'Availability': availability, 'Name': node_name, 'Role': role} client.update_node(node_id=node_id, version=version, node_spec=node_spec) salt_return.update({'Node Information': node_spec}) except TypeError: salt_return = {} salt_return.update({'Error': 'Make sure all args are passed [availability, node_name, role, node_id, version]'}) return salt_return
saltstack/salt
salt/modules/swarm.py
joinswarm
python
def joinswarm(remote_addr=int, listen_addr=int, token=str): ''' Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il' ''' try: salt_return = {} __context__['client'].swarm.join(remote_addrs=[remote_addr], listen_addr=listen_addr, join_token=token) output = __context__['server_name'] + ' has joined the Swarm' salt_return.update({'Comment': output, 'Manager_Addr': remote_addr}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are ' 'passing remote_addr, listen_addr and token correctly.'}) return salt_return
Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swarm.py#L115-L150
null
# -*- coding: utf-8 -*- ''' Docker Swarm Module using Docker's Python SDK ============================================= :codeauthor: Tyler Jones <jonestyler806@gmail.com> .. versionadded:: 2018.3.0 The Docker Swarm Module is used to manage and create Docker Swarms. Dependencies ------------ - Docker installed on the host - Docker python sdk >= 2.5.1 Docker Python SDK ----------------- .. code-block:: bash pip install -U docker More information: https://docker-py.readthedocs.io/en/stable/ ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json try: import docker HAS_DOCKER = True except ImportError: HAS_DOCKER = False __virtualname__ = 'swarm' def __virtual__(): ''' Load this module if the docker python module is installed ''' if HAS_DOCKER: return __virtualname__ return False, 'The swarm module failed to load: Docker python module is not available.' def __init__(self): if HAS_DOCKER: __context__['client'] = docker.from_env() __context__['server_name'] = __grains__['id'] def swarm_tokens(): ''' Get the Docker Swarm Manager or Worker join tokens CLI Example: .. code-block:: bash salt '*' swarm.swarm_tokens ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_swarm() return service['JoinTokens'] def swarm_init(advertise_addr=str, listen_addr=int, force_new_cluster=bool): ''' Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False ''' try: salt_return = {} __context__['client'].swarm.init(advertise_addr, listen_addr, force_new_cluster) output = 'Docker swarm has been initialized on {0} ' \ 'and the worker/manager Join token is below'.format(__context__['server_name']) salt_return.update({'Comment': output, 'Tokens': swarm_tokens()}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing advertise_addr, ' 'listen_addr and force_new_cluster correctly.'}) return salt_return def leave_swarm(force=bool): ''' Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False ''' salt_return = {} __context__['client'].swarm.leave(force=force) output = __context__['server_name'] + ' has left the swarm' salt_return.update({'Comment': output}) return salt_return def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return def swarm_service_info(service_name=str): ''' Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_service(service=service_name) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) version = dump['Version']['Index'] name = dump['Spec']['Name'] network_mode = dump['Spec']['EndpointSpec']['Mode'] ports = dump['Spec']['EndpointSpec']['Ports'] swarm_id = dump['ID'] create_date = dump['CreatedAt'] update_date = dump['UpdatedAt'] labels = dump['Spec']['Labels'] replicas = dump['Spec']['Mode']['Replicated']['Replicas'] network = dump['Endpoint']['VirtualIPs'] image = dump['Spec']['TaskTemplate']['ContainerSpec']['Image'] for items in ports: published_port = items['PublishedPort'] target_port = items['TargetPort'] published_mode = items['PublishMode'] protocol = items['Protocol'] salt_return.update({'Service Name': name, 'Replicas': replicas, 'Service ID': swarm_id, 'Network': network, 'Network Mode': network_mode, 'Creation Date': create_date, 'Update Date': update_date, 'Published Port': published_port, 'Target Port': target_port, 'Published Mode': published_mode, 'Protocol': protocol, 'Docker Image': image, 'Minion Id': __context__['server_name'], 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'service_name arg is missing?'}) return salt_return def remove_service(service=str): ''' Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.remove_service(service) salt_return.update({'Service Deleted': service, 'Minion ID': __context__['server_name']}) except TypeError: salt_return = {} salt_return.update({'Error': 'service arg is missing?'}) return salt_return def node_ls(server=str): ''' Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1 ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.nodes(filters=({'name': server})) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) for items in dump: docker_version = items['Description']['Engine']['EngineVersion'] platform = items['Description']['Platform'] hostnames = items['Description']['Hostname'] ids = items['ID'] role = items['Spec']['Role'] availability = items['Spec']['Availability'] status = items['Status'] version = items['Version']['Index'] salt_return.update({'Docker Version': docker_version, 'Platform': platform, 'Hostname': hostnames, 'ID': ids, 'Roles': role, 'Availability': availability, 'Status': status, 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'}) return salt_return def remove_node(node_id=str, force=bool): ''' Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: if force == 'True': service = client.remove_node(node_id, force=True) return service else: service = client.remove_node(node_id, force=False) return service except TypeError: salt_return = {} salt_return.update({'Error': 'Is the node_id and/or force=True/False missing?'}) return salt_return def update_node(availability=str, node_name=str, role=str, node_id=str, version=int): ''' Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19 ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: salt_return = {} node_spec = {'Availability': availability, 'Name': node_name, 'Role': role} client.update_node(node_id=node_id, version=version, node_spec=node_spec) salt_return.update({'Node Information': node_spec}) except TypeError: salt_return = {} salt_return.update({'Error': 'Make sure all args are passed [availability, node_name, role, node_id, version]'}) return salt_return
saltstack/salt
salt/modules/swarm.py
leave_swarm
python
def leave_swarm(force=bool): ''' Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False ''' salt_return = {} __context__['client'].swarm.leave(force=force) output = __context__['server_name'] + ' has left the swarm' salt_return.update({'Comment': output}) return salt_return
Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swarm.py#L153-L170
null
# -*- coding: utf-8 -*- ''' Docker Swarm Module using Docker's Python SDK ============================================= :codeauthor: Tyler Jones <jonestyler806@gmail.com> .. versionadded:: 2018.3.0 The Docker Swarm Module is used to manage and create Docker Swarms. Dependencies ------------ - Docker installed on the host - Docker python sdk >= 2.5.1 Docker Python SDK ----------------- .. code-block:: bash pip install -U docker More information: https://docker-py.readthedocs.io/en/stable/ ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json try: import docker HAS_DOCKER = True except ImportError: HAS_DOCKER = False __virtualname__ = 'swarm' def __virtual__(): ''' Load this module if the docker python module is installed ''' if HAS_DOCKER: return __virtualname__ return False, 'The swarm module failed to load: Docker python module is not available.' def __init__(self): if HAS_DOCKER: __context__['client'] = docker.from_env() __context__['server_name'] = __grains__['id'] def swarm_tokens(): ''' Get the Docker Swarm Manager or Worker join tokens CLI Example: .. code-block:: bash salt '*' swarm.swarm_tokens ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_swarm() return service['JoinTokens'] def swarm_init(advertise_addr=str, listen_addr=int, force_new_cluster=bool): ''' Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False ''' try: salt_return = {} __context__['client'].swarm.init(advertise_addr, listen_addr, force_new_cluster) output = 'Docker swarm has been initialized on {0} ' \ 'and the worker/manager Join token is below'.format(__context__['server_name']) salt_return.update({'Comment': output, 'Tokens': swarm_tokens()}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing advertise_addr, ' 'listen_addr and force_new_cluster correctly.'}) return salt_return def joinswarm(remote_addr=int, listen_addr=int, token=str): ''' Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il' ''' try: salt_return = {} __context__['client'].swarm.join(remote_addrs=[remote_addr], listen_addr=listen_addr, join_token=token) output = __context__['server_name'] + ' has joined the Swarm' salt_return.update({'Comment': output, 'Manager_Addr': remote_addr}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are ' 'passing remote_addr, listen_addr and token correctly.'}) return salt_return def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return def swarm_service_info(service_name=str): ''' Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_service(service=service_name) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) version = dump['Version']['Index'] name = dump['Spec']['Name'] network_mode = dump['Spec']['EndpointSpec']['Mode'] ports = dump['Spec']['EndpointSpec']['Ports'] swarm_id = dump['ID'] create_date = dump['CreatedAt'] update_date = dump['UpdatedAt'] labels = dump['Spec']['Labels'] replicas = dump['Spec']['Mode']['Replicated']['Replicas'] network = dump['Endpoint']['VirtualIPs'] image = dump['Spec']['TaskTemplate']['ContainerSpec']['Image'] for items in ports: published_port = items['PublishedPort'] target_port = items['TargetPort'] published_mode = items['PublishMode'] protocol = items['Protocol'] salt_return.update({'Service Name': name, 'Replicas': replicas, 'Service ID': swarm_id, 'Network': network, 'Network Mode': network_mode, 'Creation Date': create_date, 'Update Date': update_date, 'Published Port': published_port, 'Target Port': target_port, 'Published Mode': published_mode, 'Protocol': protocol, 'Docker Image': image, 'Minion Id': __context__['server_name'], 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'service_name arg is missing?'}) return salt_return def remove_service(service=str): ''' Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.remove_service(service) salt_return.update({'Service Deleted': service, 'Minion ID': __context__['server_name']}) except TypeError: salt_return = {} salt_return.update({'Error': 'service arg is missing?'}) return salt_return def node_ls(server=str): ''' Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1 ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.nodes(filters=({'name': server})) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) for items in dump: docker_version = items['Description']['Engine']['EngineVersion'] platform = items['Description']['Platform'] hostnames = items['Description']['Hostname'] ids = items['ID'] role = items['Spec']['Role'] availability = items['Spec']['Availability'] status = items['Status'] version = items['Version']['Index'] salt_return.update({'Docker Version': docker_version, 'Platform': platform, 'Hostname': hostnames, 'ID': ids, 'Roles': role, 'Availability': availability, 'Status': status, 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'}) return salt_return def remove_node(node_id=str, force=bool): ''' Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: if force == 'True': service = client.remove_node(node_id, force=True) return service else: service = client.remove_node(node_id, force=False) return service except TypeError: salt_return = {} salt_return.update({'Error': 'Is the node_id and/or force=True/False missing?'}) return salt_return def update_node(availability=str, node_name=str, role=str, node_id=str, version=int): ''' Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19 ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: salt_return = {} node_spec = {'Availability': availability, 'Name': node_name, 'Role': role} client.update_node(node_id=node_id, version=version, node_spec=node_spec) salt_return.update({'Node Information': node_spec}) except TypeError: salt_return = {} salt_return.update({'Error': 'Make sure all args are passed [availability, node_name, role, node_id, version]'}) return salt_return
saltstack/salt
salt/modules/swarm.py
service_create
python
def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return
Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swarm.py#L173-L234
null
# -*- coding: utf-8 -*- ''' Docker Swarm Module using Docker's Python SDK ============================================= :codeauthor: Tyler Jones <jonestyler806@gmail.com> .. versionadded:: 2018.3.0 The Docker Swarm Module is used to manage and create Docker Swarms. Dependencies ------------ - Docker installed on the host - Docker python sdk >= 2.5.1 Docker Python SDK ----------------- .. code-block:: bash pip install -U docker More information: https://docker-py.readthedocs.io/en/stable/ ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json try: import docker HAS_DOCKER = True except ImportError: HAS_DOCKER = False __virtualname__ = 'swarm' def __virtual__(): ''' Load this module if the docker python module is installed ''' if HAS_DOCKER: return __virtualname__ return False, 'The swarm module failed to load: Docker python module is not available.' def __init__(self): if HAS_DOCKER: __context__['client'] = docker.from_env() __context__['server_name'] = __grains__['id'] def swarm_tokens(): ''' Get the Docker Swarm Manager or Worker join tokens CLI Example: .. code-block:: bash salt '*' swarm.swarm_tokens ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_swarm() return service['JoinTokens'] def swarm_init(advertise_addr=str, listen_addr=int, force_new_cluster=bool): ''' Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False ''' try: salt_return = {} __context__['client'].swarm.init(advertise_addr, listen_addr, force_new_cluster) output = 'Docker swarm has been initialized on {0} ' \ 'and the worker/manager Join token is below'.format(__context__['server_name']) salt_return.update({'Comment': output, 'Tokens': swarm_tokens()}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing advertise_addr, ' 'listen_addr and force_new_cluster correctly.'}) return salt_return def joinswarm(remote_addr=int, listen_addr=int, token=str): ''' Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il' ''' try: salt_return = {} __context__['client'].swarm.join(remote_addrs=[remote_addr], listen_addr=listen_addr, join_token=token) output = __context__['server_name'] + ' has joined the Swarm' salt_return.update({'Comment': output, 'Manager_Addr': remote_addr}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are ' 'passing remote_addr, listen_addr and token correctly.'}) return salt_return def leave_swarm(force=bool): ''' Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False ''' salt_return = {} __context__['client'].swarm.leave(force=force) output = __context__['server_name'] + ' has left the swarm' salt_return.update({'Comment': output}) return salt_return def swarm_service_info(service_name=str): ''' Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_service(service=service_name) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) version = dump['Version']['Index'] name = dump['Spec']['Name'] network_mode = dump['Spec']['EndpointSpec']['Mode'] ports = dump['Spec']['EndpointSpec']['Ports'] swarm_id = dump['ID'] create_date = dump['CreatedAt'] update_date = dump['UpdatedAt'] labels = dump['Spec']['Labels'] replicas = dump['Spec']['Mode']['Replicated']['Replicas'] network = dump['Endpoint']['VirtualIPs'] image = dump['Spec']['TaskTemplate']['ContainerSpec']['Image'] for items in ports: published_port = items['PublishedPort'] target_port = items['TargetPort'] published_mode = items['PublishMode'] protocol = items['Protocol'] salt_return.update({'Service Name': name, 'Replicas': replicas, 'Service ID': swarm_id, 'Network': network, 'Network Mode': network_mode, 'Creation Date': create_date, 'Update Date': update_date, 'Published Port': published_port, 'Target Port': target_port, 'Published Mode': published_mode, 'Protocol': protocol, 'Docker Image': image, 'Minion Id': __context__['server_name'], 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'service_name arg is missing?'}) return salt_return def remove_service(service=str): ''' Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.remove_service(service) salt_return.update({'Service Deleted': service, 'Minion ID': __context__['server_name']}) except TypeError: salt_return = {} salt_return.update({'Error': 'service arg is missing?'}) return salt_return def node_ls(server=str): ''' Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1 ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.nodes(filters=({'name': server})) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) for items in dump: docker_version = items['Description']['Engine']['EngineVersion'] platform = items['Description']['Platform'] hostnames = items['Description']['Hostname'] ids = items['ID'] role = items['Spec']['Role'] availability = items['Spec']['Availability'] status = items['Status'] version = items['Version']['Index'] salt_return.update({'Docker Version': docker_version, 'Platform': platform, 'Hostname': hostnames, 'ID': ids, 'Roles': role, 'Availability': availability, 'Status': status, 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'}) return salt_return def remove_node(node_id=str, force=bool): ''' Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: if force == 'True': service = client.remove_node(node_id, force=True) return service else: service = client.remove_node(node_id, force=False) return service except TypeError: salt_return = {} salt_return.update({'Error': 'Is the node_id and/or force=True/False missing?'}) return salt_return def update_node(availability=str, node_name=str, role=str, node_id=str, version=int): ''' Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19 ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: salt_return = {} node_spec = {'Availability': availability, 'Name': node_name, 'Role': role} client.update_node(node_id=node_id, version=version, node_spec=node_spec) salt_return.update({'Node Information': node_spec}) except TypeError: salt_return = {} salt_return.update({'Error': 'Make sure all args are passed [availability, node_name, role, node_id, version]'}) return salt_return
saltstack/salt
salt/modules/swarm.py
swarm_service_info
python
def swarm_service_info(service_name=str): ''' Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_service(service=service_name) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) version = dump['Version']['Index'] name = dump['Spec']['Name'] network_mode = dump['Spec']['EndpointSpec']['Mode'] ports = dump['Spec']['EndpointSpec']['Ports'] swarm_id = dump['ID'] create_date = dump['CreatedAt'] update_date = dump['UpdatedAt'] labels = dump['Spec']['Labels'] replicas = dump['Spec']['Mode']['Replicated']['Replicas'] network = dump['Endpoint']['VirtualIPs'] image = dump['Spec']['TaskTemplate']['ContainerSpec']['Image'] for items in ports: published_port = items['PublishedPort'] target_port = items['TargetPort'] published_mode = items['PublishMode'] protocol = items['Protocol'] salt_return.update({'Service Name': name, 'Replicas': replicas, 'Service ID': swarm_id, 'Network': network, 'Network Mode': network_mode, 'Creation Date': create_date, 'Update Date': update_date, 'Published Port': published_port, 'Target Port': target_port, 'Published Mode': published_mode, 'Protocol': protocol, 'Docker Image': image, 'Minion Id': __context__['server_name'], 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'service_name arg is missing?'}) return salt_return
Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swarm.py#L237-L289
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n", "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
# -*- coding: utf-8 -*- ''' Docker Swarm Module using Docker's Python SDK ============================================= :codeauthor: Tyler Jones <jonestyler806@gmail.com> .. versionadded:: 2018.3.0 The Docker Swarm Module is used to manage and create Docker Swarms. Dependencies ------------ - Docker installed on the host - Docker python sdk >= 2.5.1 Docker Python SDK ----------------- .. code-block:: bash pip install -U docker More information: https://docker-py.readthedocs.io/en/stable/ ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json try: import docker HAS_DOCKER = True except ImportError: HAS_DOCKER = False __virtualname__ = 'swarm' def __virtual__(): ''' Load this module if the docker python module is installed ''' if HAS_DOCKER: return __virtualname__ return False, 'The swarm module failed to load: Docker python module is not available.' def __init__(self): if HAS_DOCKER: __context__['client'] = docker.from_env() __context__['server_name'] = __grains__['id'] def swarm_tokens(): ''' Get the Docker Swarm Manager or Worker join tokens CLI Example: .. code-block:: bash salt '*' swarm.swarm_tokens ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_swarm() return service['JoinTokens'] def swarm_init(advertise_addr=str, listen_addr=int, force_new_cluster=bool): ''' Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False ''' try: salt_return = {} __context__['client'].swarm.init(advertise_addr, listen_addr, force_new_cluster) output = 'Docker swarm has been initialized on {0} ' \ 'and the worker/manager Join token is below'.format(__context__['server_name']) salt_return.update({'Comment': output, 'Tokens': swarm_tokens()}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing advertise_addr, ' 'listen_addr and force_new_cluster correctly.'}) return salt_return def joinswarm(remote_addr=int, listen_addr=int, token=str): ''' Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il' ''' try: salt_return = {} __context__['client'].swarm.join(remote_addrs=[remote_addr], listen_addr=listen_addr, join_token=token) output = __context__['server_name'] + ' has joined the Swarm' salt_return.update({'Comment': output, 'Manager_Addr': remote_addr}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are ' 'passing remote_addr, listen_addr and token correctly.'}) return salt_return def leave_swarm(force=bool): ''' Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False ''' salt_return = {} __context__['client'].swarm.leave(force=force) output = __context__['server_name'] + ' has left the swarm' salt_return.update({'Comment': output}) return salt_return def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return def remove_service(service=str): ''' Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.remove_service(service) salt_return.update({'Service Deleted': service, 'Minion ID': __context__['server_name']}) except TypeError: salt_return = {} salt_return.update({'Error': 'service arg is missing?'}) return salt_return def node_ls(server=str): ''' Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1 ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.nodes(filters=({'name': server})) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) for items in dump: docker_version = items['Description']['Engine']['EngineVersion'] platform = items['Description']['Platform'] hostnames = items['Description']['Hostname'] ids = items['ID'] role = items['Spec']['Role'] availability = items['Spec']['Availability'] status = items['Status'] version = items['Version']['Index'] salt_return.update({'Docker Version': docker_version, 'Platform': platform, 'Hostname': hostnames, 'ID': ids, 'Roles': role, 'Availability': availability, 'Status': status, 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'}) return salt_return def remove_node(node_id=str, force=bool): ''' Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: if force == 'True': service = client.remove_node(node_id, force=True) return service else: service = client.remove_node(node_id, force=False) return service except TypeError: salt_return = {} salt_return.update({'Error': 'Is the node_id and/or force=True/False missing?'}) return salt_return def update_node(availability=str, node_name=str, role=str, node_id=str, version=int): ''' Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19 ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: salt_return = {} node_spec = {'Availability': availability, 'Name': node_name, 'Role': role} client.update_node(node_id=node_id, version=version, node_spec=node_spec) salt_return.update({'Node Information': node_spec}) except TypeError: salt_return = {} salt_return.update({'Error': 'Make sure all args are passed [availability, node_name, role, node_id, version]'}) return salt_return
saltstack/salt
salt/modules/swarm.py
remove_service
python
def remove_service(service=str): ''' Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.remove_service(service) salt_return.update({'Service Deleted': service, 'Minion ID': __context__['server_name']}) except TypeError: salt_return = {} salt_return.update({'Error': 'service arg is missing?'}) return salt_return
Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swarm.py#L292-L314
null
# -*- coding: utf-8 -*- ''' Docker Swarm Module using Docker's Python SDK ============================================= :codeauthor: Tyler Jones <jonestyler806@gmail.com> .. versionadded:: 2018.3.0 The Docker Swarm Module is used to manage and create Docker Swarms. Dependencies ------------ - Docker installed on the host - Docker python sdk >= 2.5.1 Docker Python SDK ----------------- .. code-block:: bash pip install -U docker More information: https://docker-py.readthedocs.io/en/stable/ ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json try: import docker HAS_DOCKER = True except ImportError: HAS_DOCKER = False __virtualname__ = 'swarm' def __virtual__(): ''' Load this module if the docker python module is installed ''' if HAS_DOCKER: return __virtualname__ return False, 'The swarm module failed to load: Docker python module is not available.' def __init__(self): if HAS_DOCKER: __context__['client'] = docker.from_env() __context__['server_name'] = __grains__['id'] def swarm_tokens(): ''' Get the Docker Swarm Manager or Worker join tokens CLI Example: .. code-block:: bash salt '*' swarm.swarm_tokens ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_swarm() return service['JoinTokens'] def swarm_init(advertise_addr=str, listen_addr=int, force_new_cluster=bool): ''' Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False ''' try: salt_return = {} __context__['client'].swarm.init(advertise_addr, listen_addr, force_new_cluster) output = 'Docker swarm has been initialized on {0} ' \ 'and the worker/manager Join token is below'.format(__context__['server_name']) salt_return.update({'Comment': output, 'Tokens': swarm_tokens()}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing advertise_addr, ' 'listen_addr and force_new_cluster correctly.'}) return salt_return def joinswarm(remote_addr=int, listen_addr=int, token=str): ''' Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il' ''' try: salt_return = {} __context__['client'].swarm.join(remote_addrs=[remote_addr], listen_addr=listen_addr, join_token=token) output = __context__['server_name'] + ' has joined the Swarm' salt_return.update({'Comment': output, 'Manager_Addr': remote_addr}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are ' 'passing remote_addr, listen_addr and token correctly.'}) return salt_return def leave_swarm(force=bool): ''' Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False ''' salt_return = {} __context__['client'].swarm.leave(force=force) output = __context__['server_name'] + ' has left the swarm' salt_return.update({'Comment': output}) return salt_return def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return def swarm_service_info(service_name=str): ''' Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_service(service=service_name) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) version = dump['Version']['Index'] name = dump['Spec']['Name'] network_mode = dump['Spec']['EndpointSpec']['Mode'] ports = dump['Spec']['EndpointSpec']['Ports'] swarm_id = dump['ID'] create_date = dump['CreatedAt'] update_date = dump['UpdatedAt'] labels = dump['Spec']['Labels'] replicas = dump['Spec']['Mode']['Replicated']['Replicas'] network = dump['Endpoint']['VirtualIPs'] image = dump['Spec']['TaskTemplate']['ContainerSpec']['Image'] for items in ports: published_port = items['PublishedPort'] target_port = items['TargetPort'] published_mode = items['PublishMode'] protocol = items['Protocol'] salt_return.update({'Service Name': name, 'Replicas': replicas, 'Service ID': swarm_id, 'Network': network, 'Network Mode': network_mode, 'Creation Date': create_date, 'Update Date': update_date, 'Published Port': published_port, 'Target Port': target_port, 'Published Mode': published_mode, 'Protocol': protocol, 'Docker Image': image, 'Minion Id': __context__['server_name'], 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'service_name arg is missing?'}) return salt_return def node_ls(server=str): ''' Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1 ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.nodes(filters=({'name': server})) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) for items in dump: docker_version = items['Description']['Engine']['EngineVersion'] platform = items['Description']['Platform'] hostnames = items['Description']['Hostname'] ids = items['ID'] role = items['Spec']['Role'] availability = items['Spec']['Availability'] status = items['Status'] version = items['Version']['Index'] salt_return.update({'Docker Version': docker_version, 'Platform': platform, 'Hostname': hostnames, 'ID': ids, 'Roles': role, 'Availability': availability, 'Status': status, 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'}) return salt_return def remove_node(node_id=str, force=bool): ''' Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: if force == 'True': service = client.remove_node(node_id, force=True) return service else: service = client.remove_node(node_id, force=False) return service except TypeError: salt_return = {} salt_return.update({'Error': 'Is the node_id and/or force=True/False missing?'}) return salt_return def update_node(availability=str, node_name=str, role=str, node_id=str, version=int): ''' Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19 ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: salt_return = {} node_spec = {'Availability': availability, 'Name': node_name, 'Role': role} client.update_node(node_id=node_id, version=version, node_spec=node_spec) salt_return.update({'Node Information': node_spec}) except TypeError: salt_return = {} salt_return.update({'Error': 'Make sure all args are passed [availability, node_name, role, node_id, version]'}) return salt_return
saltstack/salt
salt/modules/swarm.py
node_ls
python
def node_ls(server=str): ''' Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1 ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.nodes(filters=({'name': server})) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) for items in dump: docker_version = items['Description']['Engine']['EngineVersion'] platform = items['Description']['Platform'] hostnames = items['Description']['Hostname'] ids = items['ID'] role = items['Spec']['Role'] availability = items['Spec']['Availability'] status = items['Status'] version = items['Version']['Index'] salt_return.update({'Docker Version': docker_version, 'Platform': platform, 'Hostname': hostnames, 'ID': ids, 'Roles': role, 'Availability': availability, 'Status': status, 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'}) return salt_return
Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swarm.py#L317-L356
[ "def loads(s, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.loads and prevents a traceback in the event that a bytestring is\n passed to the function. (Python < 3.6 cannot load bytestrings)\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n try:\n return json_module.loads(s, **kwargs)\n except TypeError as exc:\n # json.loads cannot load bytestrings in Python < 3.6\n if six.PY3 and isinstance(s, bytes):\n return json_module.loads(salt.utils.stringutils.to_unicode(s), **kwargs)\n else:\n raise exc\n", "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
# -*- coding: utf-8 -*- ''' Docker Swarm Module using Docker's Python SDK ============================================= :codeauthor: Tyler Jones <jonestyler806@gmail.com> .. versionadded:: 2018.3.0 The Docker Swarm Module is used to manage and create Docker Swarms. Dependencies ------------ - Docker installed on the host - Docker python sdk >= 2.5.1 Docker Python SDK ----------------- .. code-block:: bash pip install -U docker More information: https://docker-py.readthedocs.io/en/stable/ ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json try: import docker HAS_DOCKER = True except ImportError: HAS_DOCKER = False __virtualname__ = 'swarm' def __virtual__(): ''' Load this module if the docker python module is installed ''' if HAS_DOCKER: return __virtualname__ return False, 'The swarm module failed to load: Docker python module is not available.' def __init__(self): if HAS_DOCKER: __context__['client'] = docker.from_env() __context__['server_name'] = __grains__['id'] def swarm_tokens(): ''' Get the Docker Swarm Manager or Worker join tokens CLI Example: .. code-block:: bash salt '*' swarm.swarm_tokens ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_swarm() return service['JoinTokens'] def swarm_init(advertise_addr=str, listen_addr=int, force_new_cluster=bool): ''' Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False ''' try: salt_return = {} __context__['client'].swarm.init(advertise_addr, listen_addr, force_new_cluster) output = 'Docker swarm has been initialized on {0} ' \ 'and the worker/manager Join token is below'.format(__context__['server_name']) salt_return.update({'Comment': output, 'Tokens': swarm_tokens()}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing advertise_addr, ' 'listen_addr and force_new_cluster correctly.'}) return salt_return def joinswarm(remote_addr=int, listen_addr=int, token=str): ''' Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il' ''' try: salt_return = {} __context__['client'].swarm.join(remote_addrs=[remote_addr], listen_addr=listen_addr, join_token=token) output = __context__['server_name'] + ' has joined the Swarm' salt_return.update({'Comment': output, 'Manager_Addr': remote_addr}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are ' 'passing remote_addr, listen_addr and token correctly.'}) return salt_return def leave_swarm(force=bool): ''' Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False ''' salt_return = {} __context__['client'].swarm.leave(force=force) output = __context__['server_name'] + ' has left the swarm' salt_return.update({'Comment': output}) return salt_return def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return def swarm_service_info(service_name=str): ''' Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_service(service=service_name) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) version = dump['Version']['Index'] name = dump['Spec']['Name'] network_mode = dump['Spec']['EndpointSpec']['Mode'] ports = dump['Spec']['EndpointSpec']['Ports'] swarm_id = dump['ID'] create_date = dump['CreatedAt'] update_date = dump['UpdatedAt'] labels = dump['Spec']['Labels'] replicas = dump['Spec']['Mode']['Replicated']['Replicas'] network = dump['Endpoint']['VirtualIPs'] image = dump['Spec']['TaskTemplate']['ContainerSpec']['Image'] for items in ports: published_port = items['PublishedPort'] target_port = items['TargetPort'] published_mode = items['PublishMode'] protocol = items['Protocol'] salt_return.update({'Service Name': name, 'Replicas': replicas, 'Service ID': swarm_id, 'Network': network, 'Network Mode': network_mode, 'Creation Date': create_date, 'Update Date': update_date, 'Published Port': published_port, 'Target Port': target_port, 'Published Mode': published_mode, 'Protocol': protocol, 'Docker Image': image, 'Minion Id': __context__['server_name'], 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'service_name arg is missing?'}) return salt_return def remove_service(service=str): ''' Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.remove_service(service) salt_return.update({'Service Deleted': service, 'Minion ID': __context__['server_name']}) except TypeError: salt_return = {} salt_return.update({'Error': 'service arg is missing?'}) return salt_return def remove_node(node_id=str, force=bool): ''' Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: if force == 'True': service = client.remove_node(node_id, force=True) return service else: service = client.remove_node(node_id, force=False) return service except TypeError: salt_return = {} salt_return.update({'Error': 'Is the node_id and/or force=True/False missing?'}) return salt_return def update_node(availability=str, node_name=str, role=str, node_id=str, version=int): ''' Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19 ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: salt_return = {} node_spec = {'Availability': availability, 'Name': node_name, 'Role': role} client.update_node(node_id=node_id, version=version, node_spec=node_spec) salt_return.update({'Node Information': node_spec}) except TypeError: salt_return = {} salt_return.update({'Error': 'Make sure all args are passed [availability, node_name, role, node_id, version]'}) return salt_return
saltstack/salt
salt/modules/swarm.py
remove_node
python
def remove_node(node_id=str, force=bool): ''' Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: if force == 'True': service = client.remove_node(node_id, force=True) return service else: service = client.remove_node(node_id, force=False) return service except TypeError: salt_return = {} salt_return.update({'Error': 'Is the node_id and/or force=True/False missing?'}) return salt_return
Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swarm.py#L359-L386
null
# -*- coding: utf-8 -*- ''' Docker Swarm Module using Docker's Python SDK ============================================= :codeauthor: Tyler Jones <jonestyler806@gmail.com> .. versionadded:: 2018.3.0 The Docker Swarm Module is used to manage and create Docker Swarms. Dependencies ------------ - Docker installed on the host - Docker python sdk >= 2.5.1 Docker Python SDK ----------------- .. code-block:: bash pip install -U docker More information: https://docker-py.readthedocs.io/en/stable/ ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json try: import docker HAS_DOCKER = True except ImportError: HAS_DOCKER = False __virtualname__ = 'swarm' def __virtual__(): ''' Load this module if the docker python module is installed ''' if HAS_DOCKER: return __virtualname__ return False, 'The swarm module failed to load: Docker python module is not available.' def __init__(self): if HAS_DOCKER: __context__['client'] = docker.from_env() __context__['server_name'] = __grains__['id'] def swarm_tokens(): ''' Get the Docker Swarm Manager or Worker join tokens CLI Example: .. code-block:: bash salt '*' swarm.swarm_tokens ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_swarm() return service['JoinTokens'] def swarm_init(advertise_addr=str, listen_addr=int, force_new_cluster=bool): ''' Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False ''' try: salt_return = {} __context__['client'].swarm.init(advertise_addr, listen_addr, force_new_cluster) output = 'Docker swarm has been initialized on {0} ' \ 'and the worker/manager Join token is below'.format(__context__['server_name']) salt_return.update({'Comment': output, 'Tokens': swarm_tokens()}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing advertise_addr, ' 'listen_addr and force_new_cluster correctly.'}) return salt_return def joinswarm(remote_addr=int, listen_addr=int, token=str): ''' Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il' ''' try: salt_return = {} __context__['client'].swarm.join(remote_addrs=[remote_addr], listen_addr=listen_addr, join_token=token) output = __context__['server_name'] + ' has joined the Swarm' salt_return.update({'Comment': output, 'Manager_Addr': remote_addr}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are ' 'passing remote_addr, listen_addr and token correctly.'}) return salt_return def leave_swarm(force=bool): ''' Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False ''' salt_return = {} __context__['client'].swarm.leave(force=force) output = __context__['server_name'] + ' has left the swarm' salt_return.update({'Comment': output}) return salt_return def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return def swarm_service_info(service_name=str): ''' Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_service(service=service_name) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) version = dump['Version']['Index'] name = dump['Spec']['Name'] network_mode = dump['Spec']['EndpointSpec']['Mode'] ports = dump['Spec']['EndpointSpec']['Ports'] swarm_id = dump['ID'] create_date = dump['CreatedAt'] update_date = dump['UpdatedAt'] labels = dump['Spec']['Labels'] replicas = dump['Spec']['Mode']['Replicated']['Replicas'] network = dump['Endpoint']['VirtualIPs'] image = dump['Spec']['TaskTemplate']['ContainerSpec']['Image'] for items in ports: published_port = items['PublishedPort'] target_port = items['TargetPort'] published_mode = items['PublishMode'] protocol = items['Protocol'] salt_return.update({'Service Name': name, 'Replicas': replicas, 'Service ID': swarm_id, 'Network': network, 'Network Mode': network_mode, 'Creation Date': create_date, 'Update Date': update_date, 'Published Port': published_port, 'Target Port': target_port, 'Published Mode': published_mode, 'Protocol': protocol, 'Docker Image': image, 'Minion Id': __context__['server_name'], 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'service_name arg is missing?'}) return salt_return def remove_service(service=str): ''' Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.remove_service(service) salt_return.update({'Service Deleted': service, 'Minion ID': __context__['server_name']}) except TypeError: salt_return = {} salt_return.update({'Error': 'service arg is missing?'}) return salt_return def node_ls(server=str): ''' Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1 ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.nodes(filters=({'name': server})) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) for items in dump: docker_version = items['Description']['Engine']['EngineVersion'] platform = items['Description']['Platform'] hostnames = items['Description']['Hostname'] ids = items['ID'] role = items['Spec']['Role'] availability = items['Spec']['Availability'] status = items['Status'] version = items['Version']['Index'] salt_return.update({'Docker Version': docker_version, 'Platform': platform, 'Hostname': hostnames, 'ID': ids, 'Roles': role, 'Availability': availability, 'Status': status, 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'}) return salt_return def update_node(availability=str, node_name=str, role=str, node_id=str, version=int): ''' Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19 ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: salt_return = {} node_spec = {'Availability': availability, 'Name': node_name, 'Role': role} client.update_node(node_id=node_id, version=version, node_spec=node_spec) salt_return.update({'Node Information': node_spec}) except TypeError: salt_return = {} salt_return.update({'Error': 'Make sure all args are passed [availability, node_name, role, node_id, version]'}) return salt_return
saltstack/salt
salt/modules/swarm.py
update_node
python
def update_node(availability=str, node_name=str, role=str, node_id=str, version=int): ''' Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19 ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: salt_return = {} node_spec = {'Availability': availability, 'Name': node_name, 'Role': role} client.update_node(node_id=node_id, version=version, node_spec=node_spec) salt_return.update({'Node Information': node_spec}) except TypeError: salt_return = {} salt_return.update({'Error': 'Make sure all args are passed [availability, node_name, role, node_id, version]'}) return salt_return
Updates docker swarm nodes/needs to target a manager node/minion availability Drain or Active node_name minion/node role role of manager or worker node_id The Id and that can be obtained via swarm.node_ls version Is obtained by swarm.node_ls CLI Example: .. code-block:: bash salt '*' swarm.update_node availability=drain node_name=minion2 \ role=worker node_id=3k9x7t8m4pel9c0nqr3iajnzp version=19
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/swarm.py#L389-L432
null
# -*- coding: utf-8 -*- ''' Docker Swarm Module using Docker's Python SDK ============================================= :codeauthor: Tyler Jones <jonestyler806@gmail.com> .. versionadded:: 2018.3.0 The Docker Swarm Module is used to manage and create Docker Swarms. Dependencies ------------ - Docker installed on the host - Docker python sdk >= 2.5.1 Docker Python SDK ----------------- .. code-block:: bash pip install -U docker More information: https://docker-py.readthedocs.io/en/stable/ ''' # Import python libraries from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json try: import docker HAS_DOCKER = True except ImportError: HAS_DOCKER = False __virtualname__ = 'swarm' def __virtual__(): ''' Load this module if the docker python module is installed ''' if HAS_DOCKER: return __virtualname__ return False, 'The swarm module failed to load: Docker python module is not available.' def __init__(self): if HAS_DOCKER: __context__['client'] = docker.from_env() __context__['server_name'] = __grains__['id'] def swarm_tokens(): ''' Get the Docker Swarm Manager or Worker join tokens CLI Example: .. code-block:: bash salt '*' swarm.swarm_tokens ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_swarm() return service['JoinTokens'] def swarm_init(advertise_addr=str, listen_addr=int, force_new_cluster=bool): ''' Initalize Docker on Minion as a Swarm Manager advertise_addr The ip of the manager listen_addr Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567 force_new_cluster Force a new cluster if True is passed CLI Example: .. code-block:: bash salt '*' swarm.swarm_init advertise_addr='192.168.50.10' listen_addr='0.0.0.0' force_new_cluster=False ''' try: salt_return = {} __context__['client'].swarm.init(advertise_addr, listen_addr, force_new_cluster) output = 'Docker swarm has been initialized on {0} ' \ 'and the worker/manager Join token is below'.format(__context__['server_name']) salt_return.update({'Comment': output, 'Tokens': swarm_tokens()}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing advertise_addr, ' 'listen_addr and force_new_cluster correctly.'}) return salt_return def joinswarm(remote_addr=int, listen_addr=int, token=str): ''' Join a Swarm Worker to the cluster remote_addr The manager node you want to connect to for the swarm listen_addr Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP) token Either the manager join token or the worker join token. You can get the worker or manager token via ``salt '*' swarm.swarm_tokens`` CLI Example: .. code-block:: bash salt '*' swarm.joinswarm remote_addr=192.168.50.10 listen_addr='0.0.0.0' \ token='SWMTKN-1-64tux2g0701r84ofq93zppcih0pe081akq45owe9ts61f30x4t-06trjugdu7x2z47j938s54il' ''' try: salt_return = {} __context__['client'].swarm.join(remote_addrs=[remote_addr], listen_addr=listen_addr, join_token=token) output = __context__['server_name'] + ' has joined the Swarm' salt_return.update({'Comment': output, 'Manager_Addr': remote_addr}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure this minion is not part of a swarm and you are ' 'passing remote_addr, listen_addr and token correctly.'}) return salt_return def leave_swarm(force=bool): ''' Force the minion to leave the swarm force Will force the minion/worker/manager to leave the swarm CLI Example: .. code-block:: bash salt '*' swarm.leave_swarm force=False ''' salt_return = {} __context__['client'].swarm.leave(force=force) output = __context__['server_name'] + ' has left the swarm' salt_return.update({'Comment': output}) return salt_return def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return def swarm_service_info(service_name=str): ''' Swarm Service Information service_name The name of the service that you want information on about the service CLI Example: .. code-block:: bash salt '*' swarm.swarm_service_info service_name=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.inspect_service(service=service_name) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) version = dump['Version']['Index'] name = dump['Spec']['Name'] network_mode = dump['Spec']['EndpointSpec']['Mode'] ports = dump['Spec']['EndpointSpec']['Ports'] swarm_id = dump['ID'] create_date = dump['CreatedAt'] update_date = dump['UpdatedAt'] labels = dump['Spec']['Labels'] replicas = dump['Spec']['Mode']['Replicated']['Replicas'] network = dump['Endpoint']['VirtualIPs'] image = dump['Spec']['TaskTemplate']['ContainerSpec']['Image'] for items in ports: published_port = items['PublishedPort'] target_port = items['TargetPort'] published_mode = items['PublishMode'] protocol = items['Protocol'] salt_return.update({'Service Name': name, 'Replicas': replicas, 'Service ID': swarm_id, 'Network': network, 'Network Mode': network_mode, 'Creation Date': create_date, 'Update Date': update_date, 'Published Port': published_port, 'Target Port': target_port, 'Published Mode': published_mode, 'Protocol': protocol, 'Docker Image': image, 'Minion Id': __context__['server_name'], 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'service_name arg is missing?'}) return salt_return def remove_service(service=str): ''' Remove Swarm Service service The name of the service CLI Example: .. code-block:: bash salt '*' swarm.remove_service service=Test_Service ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.remove_service(service) salt_return.update({'Service Deleted': service, 'Minion ID': __context__['server_name']}) except TypeError: salt_return = {} salt_return.update({'Error': 'service arg is missing?'}) return salt_return def node_ls(server=str): ''' Displays Information about Swarm Nodes with passing in the server server The minion/server name CLI Example: .. code-block:: bash salt '*' swarm.node_ls server=minion1 ''' try: salt_return = {} client = docker.APIClient(base_url='unix://var/run/docker.sock') service = client.nodes(filters=({'name': server})) getdata = salt.utils.json.dumps(service) dump = salt.utils.json.loads(getdata) for items in dump: docker_version = items['Description']['Engine']['EngineVersion'] platform = items['Description']['Platform'] hostnames = items['Description']['Hostname'] ids = items['ID'] role = items['Spec']['Role'] availability = items['Spec']['Availability'] status = items['Status'] version = items['Version']['Index'] salt_return.update({'Docker Version': docker_version, 'Platform': platform, 'Hostname': hostnames, 'ID': ids, 'Roles': role, 'Availability': availability, 'Status': status, 'Version': version}) except TypeError: salt_return = {} salt_return.update({'Error': 'The server arg is missing or you not targeting a Manager node?'}) return salt_return def remove_node(node_id=str, force=bool): ''' Remove a node from a swarm and the target needs to be a swarm manager node_id The node id from the return of swarm.node_ls force Forcefully remove the node/minion from the service CLI Example: .. code-block:: bash salt '*' swarm.remove_node node_id=z4gjbe9rwmqahc2a91snvolm5 force=false ''' client = docker.APIClient(base_url='unix://var/run/docker.sock') try: if force == 'True': service = client.remove_node(node_id, force=True) return service else: service = client.remove_node(node_id, force=False) return service except TypeError: salt_return = {} salt_return.update({'Error': 'Is the node_id and/or force=True/False missing?'}) return salt_return
saltstack/salt
salt/states/boto_iam_role.py
present
python
def present( name, policy_document=None, policy_document_from_pillars=None, path=None, policies=None, policies_from_pillars=None, managed_policies=None, create_instance_profile=True, region=None, key=None, keyid=None, profile=None, delete_policies=True): ''' Ensure the IAM role exists. name Name of the IAM role. policy_document The policy that grants an entity permission to assume the role. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policy_document_from_pillars A pillar key that contains a role policy document. The statements defined here will be appended with the policy document statements defined in the policy_document argument. .. versionadded:: 2017.7.0 path The path to the role/instance profile. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policies A dict of IAM role policies. policies_from_pillars A list of pillars that contain role policy dicts. Policies in the pillars will be merged in the order defined in the list and key conflicts will be handled by later defined keys overriding earlier defined keys. The policies defined here will be merged with the policies defined in the policies argument. If keys conflict, the keys in the policies argument will override the keys defined in policies_from_pillars. managed_policies A list of (AWS or Customer) managed policies to be attached to the role. create_instance_profile A boolean of whether or not to create an instance profile and associate it with this role. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. delete_policies Deletes existing policies that are not in the given list of policies. Default value is ``True``. If ``False`` is specified, existing policies will not be deleted allowing manual modifications on the IAM role to be persistent. .. versionadded:: 2015.8.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Build up _policy_document _policy_document = {} if policy_document_from_pillars: from_pillars = __salt__['pillar.get'](policy_document_from_pillars) if from_pillars: _policy_document['Version'] = from_pillars['Version'] _policy_document.setdefault('Statement', []) _policy_document['Statement'].extend(from_pillars['Statement']) if policy_document: _policy_document['Version'] = policy_document['Version'] _policy_document.setdefault('Statement', []) _policy_document['Statement'].extend(policy_document['Statement']) _ret = _role_present(name, _policy_document, path, region, key, keyid, profile) # Build up _policies if not policies: policies = {} if not policies_from_pillars: policies_from_pillars = [] if not managed_policies: managed_policies = [] _policies = {} for policy in policies_from_pillars: _policy = __salt__['pillar.get'](policy) _policies.update(_policy) _policies.update(policies) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if create_instance_profile: _ret = _instance_profile_present(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_associated(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, _policies, region, key, keyid, profile, delete_policies) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] _ret = _policies_attached(name, managed_policies, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret
Ensure the IAM role exists. name Name of the IAM role. policy_document The policy that grants an entity permission to assume the role. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policy_document_from_pillars A pillar key that contains a role policy document. The statements defined here will be appended with the policy document statements defined in the policy_document argument. .. versionadded:: 2017.7.0 path The path to the role/instance profile. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policies A dict of IAM role policies. policies_from_pillars A list of pillars that contain role policy dicts. Policies in the pillars will be merged in the order defined in the list and key conflicts will be handled by later defined keys overriding earlier defined keys. The policies defined here will be merged with the policies defined in the policies argument. If keys conflict, the keys in the policies argument will override the keys defined in policies_from_pillars. managed_policies A list of (AWS or Customer) managed policies to be attached to the role. create_instance_profile A boolean of whether or not to create an instance profile and associate it with this role. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. delete_policies Deletes existing policies that are not in the given list of policies. Default value is ``True``. If ``False`` is specified, existing policies will not be deleted allowing manual modifications on the IAM role to be persistent. .. versionadded:: 2015.8.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_iam_role.py#L106-L240
[ "def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n", "def _role_present(\n name,\n policy_document=None,\n path=None,\n region=None,\n key=None,\n keyid=None,\n profile=None):\n ret = {'result': True, 'comment': '', 'changes': {}}\n role = __salt__['boto_iam.describe_role'](name, region, key, keyid,\n profile)\n if not role:\n if __opts__['test']:\n ret['comment'] = 'IAM role {0} is set to be created.'.format(name)\n ret['result'] = None\n return ret\n created = __salt__['boto_iam.create_role'](name, policy_document,\n path, region, key,\n keyid, profile)\n if created:\n ret['changes']['old'] = {'role': None}\n ret['changes']['new'] = {'role': name}\n ret['comment'] = 'IAM role {0} created.'.format(name)\n else:\n ret['result'] = False\n ret['comment'] = 'Failed to create {0} IAM role.'.format(name)\n else:\n ret['comment'] = '{0} role present.'.format(name)\n if not policy_document:\n _policy_document = __salt__['boto_iam.build_policy'](\n region, key, keyid, profile)\n else:\n _policy_document = policy_document\n if salt.utils.dictdiffer.deep_diff(\n _sort_policy(role['assume_role_policy_document']),\n _sort_policy(_policy_document)):\n if __opts__['test']:\n msg = 'Assume role policy document to be updated.'\n ret['comment'] = '{0} {1}'.format(ret['comment'], msg)\n ret['result'] = None\n return ret\n updated = __salt__['boto_iam.update_assume_role_policy'](\n name, _policy_document, region, key, keyid, profile\n )\n if updated:\n msg = 'Assume role policy document updated.'\n ret['comment'] = '{0} {1}'.format(ret['comment'], msg)\n ret['changes']['old'] = {'policy_document': role['assume_role_policy_document']}\n ret['changes']['new'] = {'policy_document': _policy_document}\n else:\n ret['result'] = False\n msg = 'Failed to update assume role policy.'\n ret['comment'] = '{0} {1}'.format(ret['comment'], msg)\n return ret\n", "def _policies_present(\n name,\n policies=None,\n region=None,\n key=None,\n keyid=None,\n profile=None,\n delete_policies=True):\n ret = {'result': True, 'comment': '', 'changes': {}}\n policies_to_create = {}\n policies_to_delete = []\n for policy_name, policy in six.iteritems(policies):\n _policy = __salt__['boto_iam.get_role_policy'](name, policy_name,\n region, key, keyid,\n profile)\n if _policy != policy:\n policies_to_create[policy_name] = policy\n _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid,\n profile)\n for policy_name in _list:\n if delete_policies and policy_name not in policies:\n policies_to_delete.append(policy_name)\n if policies_to_create or policies_to_delete:\n _to_modify = list(policies_to_delete)\n _to_modify.extend(policies_to_create)\n if __opts__['test']:\n ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name)\n ret['result'] = None\n return ret\n ret['changes']['old'] = {'policies': _list}\n for policy_name, policy in six.iteritems(policies_to_create):\n policy_set = __salt__['boto_iam.create_role_policy'](name,\n policy_name,\n policy,\n region, key,\n keyid,\n profile)\n if not policy_set:\n _list = __salt__['boto_iam.list_role_policies'](name, region,\n key, keyid,\n profile)\n ret['changes']['new'] = {'policies': _list}\n ret['result'] = False\n ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name)\n return ret\n for policy_name in policies_to_delete:\n policy_unset = __salt__['boto_iam.delete_role_policy'](name,\n policy_name,\n region, key,\n keyid,\n profile)\n if not policy_unset:\n _list = __salt__['boto_iam.list_role_policies'](name, region,\n key, keyid,\n profile)\n ret['changes']['new'] = {'policies': _list}\n ret['result'] = False\n ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name)\n return ret\n _list = __salt__['boto_iam.list_role_policies'](name, region, key,\n keyid, profile)\n ret['changes']['new'] = {'policies': _list}\n ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(_list), name)\n return ret\n", "def _instance_profile_present(\n name,\n region=None,\n key=None,\n keyid=None,\n profile=None):\n ret = {'result': True, 'comment': '', 'changes': {}}\n exists = __salt__['boto_iam.instance_profile_exists'](name, region, key,\n keyid, profile)\n if not exists:\n if __opts__['test']:\n ret['comment'] = 'Instance profile {0} is set to be created.'.format(name)\n ret['result'] = None\n return ret\n created = __salt__['boto_iam.create_instance_profile'](name, region,\n key, keyid,\n profile)\n if created:\n ret['changes']['old'] = {'instance_profile': None}\n ret['changes']['new'] = {'instance_profile': name}\n ret['comment'] = 'Instance profile {0} created.'.format(name)\n else:\n ret['result'] = False\n ret['comment'] = 'Failed to create {0} instance profile.'.format(name)\n return ret\n", "def _instance_profile_associated(\n name,\n region=None,\n key=None,\n keyid=None,\n profile=None):\n ret = {'result': True, 'comment': '', 'changes': {}}\n is_associated = __salt__['boto_iam.profile_associated'](name, name, region,\n key, keyid,\n profile)\n if not is_associated:\n if __opts__['test']:\n ret['comment'] = 'Instance profile {0} is set to be associated.'.format(name)\n ret['result'] = None\n return ret\n associated = __salt__['boto_iam.associate_profile_to_role'](name, name,\n region,\n key, keyid,\n profile)\n if associated:\n ret['changes']['old'] = {'profile_associated': None}\n ret['changes']['new'] = {'profile_associated': True}\n ret['comment'] = 'Instance profile {0} associated.'.format(name)\n else:\n ret['result'] = False\n ret['comment'] = 'Failed to associate {0} instance profile with {0} role.'.format(name)\n return ret\n", "def _policies_attached(\n name,\n managed_policies=None,\n region=None,\n key=None,\n keyid=None,\n profile=None):\n ret = {'result': True, 'comment': '', 'changes': {}}\n policies_to_attach = []\n policies_to_detach = []\n for policy in managed_policies or []:\n entities = __salt__['boto_iam.list_entities_for_policy'](\n policy,\n entity_filter='Role',\n region=region, key=key, keyid=keyid, profile=profile)\n found = False\n for roledict in entities.get('policy_roles', []):\n if name == roledict.get('role_name'):\n found = True\n break\n if not found:\n policies_to_attach.append(policy)\n _list = __salt__['boto_iam.list_attached_role_policies'](\n name, region=region, key=key, keyid=keyid, profile=profile)\n oldpolicies = [x.get('policy_arn') for x in _list]\n for policy_data in _list:\n if policy_data.get('policy_name') not in managed_policies \\\n and policy_data.get('policy_arn') not in managed_policies:\n policies_to_detach.append(policy_data.get('policy_arn'))\n if policies_to_attach or policies_to_detach:\n _to_modify = list(policies_to_detach)\n _to_modify.extend(policies_to_attach)\n if __opts__['test']:\n ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name)\n ret['result'] = None\n return ret\n ret['changes']['old'] = {'managed_policies': oldpolicies}\n for policy_name in policies_to_attach:\n policy_set = __salt__['boto_iam.attach_role_policy'](policy_name,\n role_name=name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile)\n if not policy_set:\n _list = __salt__['boto_iam.list_attached_role_policies'](\n name, region=region, key=key, keyid=keyid, profile=profile)\n newpolicies = [x.get('policy_arn') for x in _list]\n ret['changes']['new'] = {'managed_policies': newpolicies}\n ret['result'] = False\n ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name)\n return ret\n for policy_name in policies_to_detach:\n policy_unset = __salt__['boto_iam.detach_role_policy'](policy_name,\n role_name=name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile)\n if not policy_unset:\n _list = __salt__['boto_iam.list_attached_role_policies'](\n name, region=region, key=key, keyid=keyid, profile=profile)\n newpolicies = [x.get('policy_arn') for x in _list]\n ret['changes']['new'] = {'managed_policies': newpolicies}\n ret['result'] = False\n ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name)\n return ret\n _list = __salt__['boto_iam.list_attached_role_policies'](\n name, region=region, key=key, keyid=keyid, profile=profile)\n newpolicies = [x.get('policy_arn') for x in _list]\n log.debug(newpolicies)\n ret['changes']['new'] = {'managed_policies': newpolicies}\n ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(newpolicies), name)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Manage IAM roles ================ .. versionadded:: 2014.7.0 This module uses ``boto``, which can be installed via package, or pip. This module accepts explicit IAM credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml iam.keyid: GKTADJGHEIQSXMKKRBJ08H iam.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 Creating a role will automatically create an instance profile and associate it with the role. This is the default behavior of the AWS console. .. code-block:: yaml myrole: boto_iam_role.present: - region: us-east-1 - key: GKTADJGHEIQSXMKKRBJ08H - keyid: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs - policies_from_pillars: - shared_iam_bootstrap_policy - policies: MySQSPolicy: Statement: - Action: - sqs:* Effect: Allow Resource: - arn:aws:sqs:*:*:* Sid: MyPolicySQS1 MyS3Policy: Statement: - Action: - s3:GetObject Effect: Allow Resource: - arn:aws:s3:*:*:mybucket/* # Using a credentials profile from pillars myrole: boto_iam_role.present: - profile: myiamprofile # Passing in a credentials profile myrole: boto_iam_role.present: - profile: key: GKTADJGHEIQSXMKKRBJ08H keyid: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 If ``delete_policies: False`` is specified, existing policies that are not in the given list of policies will not be deleted. This allows manual modifications on the IAM role to be persistent. This functionality was added in 2015.8.0. .. note:: When using the ``profile`` parameter and ``region`` is set outside of the profile group, region is ignored and a default region will be used. If ``region`` is missing from the ``profile`` data set, ``us-east-1`` will be used as the default region. ''' from __future__ import absolute_import, print_function, unicode_literals import logging import salt.utils.dictupdate as dictupdate from salt.utils.odict import OrderedDict import salt.utils.dictdiffer from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_iam_role' if 'boto_iam.role_exists' in __salt__ else False def _role_present( name, policy_document=None, path=None, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} role = __salt__['boto_iam.describe_role'](name, region, key, keyid, profile) if not role: if __opts__['test']: ret['comment'] = 'IAM role {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_iam.create_role'](name, policy_document, path, region, key, keyid, profile) if created: ret['changes']['old'] = {'role': None} ret['changes']['new'] = {'role': name} ret['comment'] = 'IAM role {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} IAM role.'.format(name) else: ret['comment'] = '{0} role present.'.format(name) if not policy_document: _policy_document = __salt__['boto_iam.build_policy']( region, key, keyid, profile) else: _policy_document = policy_document if salt.utils.dictdiffer.deep_diff( _sort_policy(role['assume_role_policy_document']), _sort_policy(_policy_document)): if __opts__['test']: msg = 'Assume role policy document to be updated.' ret['comment'] = '{0} {1}'.format(ret['comment'], msg) ret['result'] = None return ret updated = __salt__['boto_iam.update_assume_role_policy']( name, _policy_document, region, key, keyid, profile ) if updated: msg = 'Assume role policy document updated.' ret['comment'] = '{0} {1}'.format(ret['comment'], msg) ret['changes']['old'] = {'policy_document': role['assume_role_policy_document']} ret['changes']['new'] = {'policy_document': _policy_document} else: ret['result'] = False msg = 'Failed to update assume role policy.' ret['comment'] = '{0} {1}'.format(ret['comment'], msg) return ret def _instance_profile_present( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_iam.instance_profile_exists'](name, region, key, keyid, profile) if not exists: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_iam.create_instance_profile'](name, region, key, keyid, profile) if created: ret['changes']['old'] = {'instance_profile': None} ret['changes']['new'] = {'instance_profile': name} ret['comment'] = 'Instance profile {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} instance profile.'.format(name) return ret def _instance_profile_associated( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} is_associated = __salt__['boto_iam.profile_associated'](name, name, region, key, keyid, profile) if not is_associated: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be associated.'.format(name) ret['result'] = None return ret associated = __salt__['boto_iam.associate_profile_to_role'](name, name, region, key, keyid, profile) if associated: ret['changes']['old'] = {'profile_associated': None} ret['changes']['new'] = {'profile_associated': True} ret['comment'] = 'Instance profile {0} associated.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to associate {0} instance profile with {0} role.'.format(name) return ret def _sort_policy(doc): ''' List-type sub-items in policies don't happen to be order-sensitive, but compare operations will render them unequal, leading to non-idempotent state runs. We'll sort any list-type subitems before comparison to reduce the likelihood of false negatives. ''' if isinstance(doc, list): return sorted([_sort_policy(i) for i in doc]) elif isinstance(doc, (dict, OrderedDict)): return dict([(k, _sort_policy(v)) for k, v in six.iteritems(doc)]) return doc def _policies_present( name, policies=None, region=None, key=None, keyid=None, profile=None, delete_policies=True): ret = {'result': True, 'comment': '', 'changes': {}} policies_to_create = {} policies_to_delete = [] for policy_name, policy in six.iteritems(policies): _policy = __salt__['boto_iam.get_role_policy'](name, policy_name, region, key, keyid, profile) if _policy != policy: policies_to_create[policy_name] = policy _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) for policy_name in _list: if delete_policies and policy_name not in policies: policies_to_delete.append(policy_name) if policies_to_create or policies_to_delete: _to_modify = list(policies_to_delete) _to_modify.extend(policies_to_create) if __opts__['test']: ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name) ret['result'] = None return ret ret['changes']['old'] = {'policies': _list} for policy_name, policy in six.iteritems(policies_to_create): policy_set = __salt__['boto_iam.create_role_policy'](name, policy_name, policy, region, key, keyid, profile) if not policy_set: _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['result'] = False ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name) return ret for policy_name in policies_to_delete: policy_unset = __salt__['boto_iam.delete_role_policy'](name, policy_name, region, key, keyid, profile) if not policy_unset: _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['result'] = False ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name) return ret _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(_list), name) return ret def _policies_attached( name, managed_policies=None, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} policies_to_attach = [] policies_to_detach = [] for policy in managed_policies or []: entities = __salt__['boto_iam.list_entities_for_policy']( policy, entity_filter='Role', region=region, key=key, keyid=keyid, profile=profile) found = False for roledict in entities.get('policy_roles', []): if name == roledict.get('role_name'): found = True break if not found: policies_to_attach.append(policy) _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) oldpolicies = [x.get('policy_arn') for x in _list] for policy_data in _list: if policy_data.get('policy_name') not in managed_policies \ and policy_data.get('policy_arn') not in managed_policies: policies_to_detach.append(policy_data.get('policy_arn')) if policies_to_attach or policies_to_detach: _to_modify = list(policies_to_detach) _to_modify.extend(policies_to_attach) if __opts__['test']: ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name) ret['result'] = None return ret ret['changes']['old'] = {'managed_policies': oldpolicies} for policy_name in policies_to_attach: policy_set = __salt__['boto_iam.attach_role_policy'](policy_name, role_name=name, region=region, key=key, keyid=keyid, profile=profile) if not policy_set: _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['result'] = False ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name) return ret for policy_name in policies_to_detach: policy_unset = __salt__['boto_iam.detach_role_policy'](policy_name, role_name=name, region=region, key=key, keyid=keyid, profile=profile) if not policy_unset: _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['result'] = False ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name) return ret _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] log.debug(newpolicies) ret['changes']['new'] = {'managed_policies': newpolicies} ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(newpolicies), name) return ret def absent( name, region=None, key=None, keyid=None, profile=None): ''' Ensure the IAM role is deleted. name Name of the IAM role. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} _ret = _policies_absent(name, region, key, keyid, profile) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _policies_detached(name, region, key, keyid, profile) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_disassociated(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_absent(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _role_absent(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _role_absent( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_iam.role_exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'IAM role {0} is set to be removed.'.format( name) ret['result'] = None return ret deleted = __salt__['boto_iam.delete_role'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'role': name} ret['changes']['new'] = {'role': None} ret['comment'] = 'IAM role {0} removed.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} iam role.'.format(name) else: ret['comment'] = '{0} role does not exist.'.format(name) return ret def _instance_profile_absent( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_iam.instance_profile_exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_iam.delete_instance_profile'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'instance_profile': name} ret['changes']['new'] = {'instance_profile': None} ret['comment'] = 'Instance profile {0} removed.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} instance profile.'.format(name) else: ret['comment'] = '{0} instance profile does not exist.'.format(name) return ret def _policies_absent( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) if not _list: ret['comment'] = 'No policies in role {0}.'.format(name) return ret if __opts__['test']: ret['comment'] = '{0} policies to be removed from role {1}.'.format(', '.join(_list), name) ret['result'] = None return ret ret['changes']['old'] = {'policies': _list} for policy_name in _list: policy_unset = __salt__['boto_iam.delete_role_policy'](name, policy_name, region, key, keyid, profile) if not policy_unset: _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['result'] = False ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name) return ret _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['comment'] = '{0} policies removed from role {1}.'.format(', '.join(_list), name) return ret def _policies_detached( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} _list = __salt__['boto_iam.list_attached_role_policies']( role_name=name, region=region, key=key, keyid=keyid, profile=profile) oldpolicies = [x.get('policy_arn') for x in _list] if not _list: ret['comment'] = 'No attached policies in role {0}.'.format(name) return ret if __opts__['test']: ret['comment'] = '{0} policies to be detached from role {1}.'.format(', '.join(oldpolicies), name) ret['result'] = None return ret ret['changes']['old'] = {'managed_policies': oldpolicies} for policy_arn in oldpolicies: policy_unset = __salt__['boto_iam.detach_role_policy'](policy_arn, name, region=region, key=key, keyid=keyid, profile=profile) if not policy_unset: _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['result'] = False ret['comment'] = 'Failed to detach {0} from role {1}'.format(policy_arn, name) return ret _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['comment'] = '{0} policies detached from role {1}.'.format(', '.join(newpolicies), name) return ret def _instance_profile_disassociated( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} is_associated = __salt__['boto_iam.profile_associated'](name, name, region, key, keyid, profile) if is_associated: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be disassociated.'.format(name) ret['result'] = None return ret associated = __salt__['boto_iam.disassociate_profile_from_role'](name, name, region, key, keyid, profile) if associated: ret['changes']['old'] = {'profile_associated': True} ret['changes']['new'] = {'profile_associated': False} ret['comment'] = 'Instance profile {0} disassociated.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to disassociate {0} instance profile from {0} role.'.format(name) return ret
saltstack/salt
salt/states/boto_iam_role.py
_sort_policy
python
def _sort_policy(doc): ''' List-type sub-items in policies don't happen to be order-sensitive, but compare operations will render them unequal, leading to non-idempotent state runs. We'll sort any list-type subitems before comparison to reduce the likelihood of false negatives. ''' if isinstance(doc, list): return sorted([_sort_policy(i) for i in doc]) elif isinstance(doc, (dict, OrderedDict)): return dict([(k, _sort_policy(v)) for k, v in six.iteritems(doc)]) return doc
List-type sub-items in policies don't happen to be order-sensitive, but compare operations will render them unequal, leading to non-idempotent state runs. We'll sort any list-type subitems before comparison to reduce the likelihood of false negatives.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_iam_role.py#L355-L366
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Manage IAM roles ================ .. versionadded:: 2014.7.0 This module uses ``boto``, which can be installed via package, or pip. This module accepts explicit IAM credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml iam.keyid: GKTADJGHEIQSXMKKRBJ08H iam.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 Creating a role will automatically create an instance profile and associate it with the role. This is the default behavior of the AWS console. .. code-block:: yaml myrole: boto_iam_role.present: - region: us-east-1 - key: GKTADJGHEIQSXMKKRBJ08H - keyid: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs - policies_from_pillars: - shared_iam_bootstrap_policy - policies: MySQSPolicy: Statement: - Action: - sqs:* Effect: Allow Resource: - arn:aws:sqs:*:*:* Sid: MyPolicySQS1 MyS3Policy: Statement: - Action: - s3:GetObject Effect: Allow Resource: - arn:aws:s3:*:*:mybucket/* # Using a credentials profile from pillars myrole: boto_iam_role.present: - profile: myiamprofile # Passing in a credentials profile myrole: boto_iam_role.present: - profile: key: GKTADJGHEIQSXMKKRBJ08H keyid: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 If ``delete_policies: False`` is specified, existing policies that are not in the given list of policies will not be deleted. This allows manual modifications on the IAM role to be persistent. This functionality was added in 2015.8.0. .. note:: When using the ``profile`` parameter and ``region`` is set outside of the profile group, region is ignored and a default region will be used. If ``region`` is missing from the ``profile`` data set, ``us-east-1`` will be used as the default region. ''' from __future__ import absolute_import, print_function, unicode_literals import logging import salt.utils.dictupdate as dictupdate from salt.utils.odict import OrderedDict import salt.utils.dictdiffer from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_iam_role' if 'boto_iam.role_exists' in __salt__ else False def present( name, policy_document=None, policy_document_from_pillars=None, path=None, policies=None, policies_from_pillars=None, managed_policies=None, create_instance_profile=True, region=None, key=None, keyid=None, profile=None, delete_policies=True): ''' Ensure the IAM role exists. name Name of the IAM role. policy_document The policy that grants an entity permission to assume the role. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policy_document_from_pillars A pillar key that contains a role policy document. The statements defined here will be appended with the policy document statements defined in the policy_document argument. .. versionadded:: 2017.7.0 path The path to the role/instance profile. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policies A dict of IAM role policies. policies_from_pillars A list of pillars that contain role policy dicts. Policies in the pillars will be merged in the order defined in the list and key conflicts will be handled by later defined keys overriding earlier defined keys. The policies defined here will be merged with the policies defined in the policies argument. If keys conflict, the keys in the policies argument will override the keys defined in policies_from_pillars. managed_policies A list of (AWS or Customer) managed policies to be attached to the role. create_instance_profile A boolean of whether or not to create an instance profile and associate it with this role. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. delete_policies Deletes existing policies that are not in the given list of policies. Default value is ``True``. If ``False`` is specified, existing policies will not be deleted allowing manual modifications on the IAM role to be persistent. .. versionadded:: 2015.8.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Build up _policy_document _policy_document = {} if policy_document_from_pillars: from_pillars = __salt__['pillar.get'](policy_document_from_pillars) if from_pillars: _policy_document['Version'] = from_pillars['Version'] _policy_document.setdefault('Statement', []) _policy_document['Statement'].extend(from_pillars['Statement']) if policy_document: _policy_document['Version'] = policy_document['Version'] _policy_document.setdefault('Statement', []) _policy_document['Statement'].extend(policy_document['Statement']) _ret = _role_present(name, _policy_document, path, region, key, keyid, profile) # Build up _policies if not policies: policies = {} if not policies_from_pillars: policies_from_pillars = [] if not managed_policies: managed_policies = [] _policies = {} for policy in policies_from_pillars: _policy = __salt__['pillar.get'](policy) _policies.update(_policy) _policies.update(policies) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if create_instance_profile: _ret = _instance_profile_present(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_associated(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, _policies, region, key, keyid, profile, delete_policies) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] _ret = _policies_attached(name, managed_policies, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _role_present( name, policy_document=None, path=None, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} role = __salt__['boto_iam.describe_role'](name, region, key, keyid, profile) if not role: if __opts__['test']: ret['comment'] = 'IAM role {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_iam.create_role'](name, policy_document, path, region, key, keyid, profile) if created: ret['changes']['old'] = {'role': None} ret['changes']['new'] = {'role': name} ret['comment'] = 'IAM role {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} IAM role.'.format(name) else: ret['comment'] = '{0} role present.'.format(name) if not policy_document: _policy_document = __salt__['boto_iam.build_policy']( region, key, keyid, profile) else: _policy_document = policy_document if salt.utils.dictdiffer.deep_diff( _sort_policy(role['assume_role_policy_document']), _sort_policy(_policy_document)): if __opts__['test']: msg = 'Assume role policy document to be updated.' ret['comment'] = '{0} {1}'.format(ret['comment'], msg) ret['result'] = None return ret updated = __salt__['boto_iam.update_assume_role_policy']( name, _policy_document, region, key, keyid, profile ) if updated: msg = 'Assume role policy document updated.' ret['comment'] = '{0} {1}'.format(ret['comment'], msg) ret['changes']['old'] = {'policy_document': role['assume_role_policy_document']} ret['changes']['new'] = {'policy_document': _policy_document} else: ret['result'] = False msg = 'Failed to update assume role policy.' ret['comment'] = '{0} {1}'.format(ret['comment'], msg) return ret def _instance_profile_present( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_iam.instance_profile_exists'](name, region, key, keyid, profile) if not exists: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_iam.create_instance_profile'](name, region, key, keyid, profile) if created: ret['changes']['old'] = {'instance_profile': None} ret['changes']['new'] = {'instance_profile': name} ret['comment'] = 'Instance profile {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} instance profile.'.format(name) return ret def _instance_profile_associated( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} is_associated = __salt__['boto_iam.profile_associated'](name, name, region, key, keyid, profile) if not is_associated: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be associated.'.format(name) ret['result'] = None return ret associated = __salt__['boto_iam.associate_profile_to_role'](name, name, region, key, keyid, profile) if associated: ret['changes']['old'] = {'profile_associated': None} ret['changes']['new'] = {'profile_associated': True} ret['comment'] = 'Instance profile {0} associated.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to associate {0} instance profile with {0} role.'.format(name) return ret def _policies_present( name, policies=None, region=None, key=None, keyid=None, profile=None, delete_policies=True): ret = {'result': True, 'comment': '', 'changes': {}} policies_to_create = {} policies_to_delete = [] for policy_name, policy in six.iteritems(policies): _policy = __salt__['boto_iam.get_role_policy'](name, policy_name, region, key, keyid, profile) if _policy != policy: policies_to_create[policy_name] = policy _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) for policy_name in _list: if delete_policies and policy_name not in policies: policies_to_delete.append(policy_name) if policies_to_create or policies_to_delete: _to_modify = list(policies_to_delete) _to_modify.extend(policies_to_create) if __opts__['test']: ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name) ret['result'] = None return ret ret['changes']['old'] = {'policies': _list} for policy_name, policy in six.iteritems(policies_to_create): policy_set = __salt__['boto_iam.create_role_policy'](name, policy_name, policy, region, key, keyid, profile) if not policy_set: _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['result'] = False ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name) return ret for policy_name in policies_to_delete: policy_unset = __salt__['boto_iam.delete_role_policy'](name, policy_name, region, key, keyid, profile) if not policy_unset: _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['result'] = False ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name) return ret _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(_list), name) return ret def _policies_attached( name, managed_policies=None, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} policies_to_attach = [] policies_to_detach = [] for policy in managed_policies or []: entities = __salt__['boto_iam.list_entities_for_policy']( policy, entity_filter='Role', region=region, key=key, keyid=keyid, profile=profile) found = False for roledict in entities.get('policy_roles', []): if name == roledict.get('role_name'): found = True break if not found: policies_to_attach.append(policy) _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) oldpolicies = [x.get('policy_arn') for x in _list] for policy_data in _list: if policy_data.get('policy_name') not in managed_policies \ and policy_data.get('policy_arn') not in managed_policies: policies_to_detach.append(policy_data.get('policy_arn')) if policies_to_attach or policies_to_detach: _to_modify = list(policies_to_detach) _to_modify.extend(policies_to_attach) if __opts__['test']: ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name) ret['result'] = None return ret ret['changes']['old'] = {'managed_policies': oldpolicies} for policy_name in policies_to_attach: policy_set = __salt__['boto_iam.attach_role_policy'](policy_name, role_name=name, region=region, key=key, keyid=keyid, profile=profile) if not policy_set: _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['result'] = False ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name) return ret for policy_name in policies_to_detach: policy_unset = __salt__['boto_iam.detach_role_policy'](policy_name, role_name=name, region=region, key=key, keyid=keyid, profile=profile) if not policy_unset: _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['result'] = False ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name) return ret _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] log.debug(newpolicies) ret['changes']['new'] = {'managed_policies': newpolicies} ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(newpolicies), name) return ret def absent( name, region=None, key=None, keyid=None, profile=None): ''' Ensure the IAM role is deleted. name Name of the IAM role. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} _ret = _policies_absent(name, region, key, keyid, profile) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _policies_detached(name, region, key, keyid, profile) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_disassociated(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_absent(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _role_absent(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _role_absent( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_iam.role_exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'IAM role {0} is set to be removed.'.format( name) ret['result'] = None return ret deleted = __salt__['boto_iam.delete_role'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'role': name} ret['changes']['new'] = {'role': None} ret['comment'] = 'IAM role {0} removed.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} iam role.'.format(name) else: ret['comment'] = '{0} role does not exist.'.format(name) return ret def _instance_profile_absent( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_iam.instance_profile_exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_iam.delete_instance_profile'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'instance_profile': name} ret['changes']['new'] = {'instance_profile': None} ret['comment'] = 'Instance profile {0} removed.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} instance profile.'.format(name) else: ret['comment'] = '{0} instance profile does not exist.'.format(name) return ret def _policies_absent( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) if not _list: ret['comment'] = 'No policies in role {0}.'.format(name) return ret if __opts__['test']: ret['comment'] = '{0} policies to be removed from role {1}.'.format(', '.join(_list), name) ret['result'] = None return ret ret['changes']['old'] = {'policies': _list} for policy_name in _list: policy_unset = __salt__['boto_iam.delete_role_policy'](name, policy_name, region, key, keyid, profile) if not policy_unset: _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['result'] = False ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name) return ret _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['comment'] = '{0} policies removed from role {1}.'.format(', '.join(_list), name) return ret def _policies_detached( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} _list = __salt__['boto_iam.list_attached_role_policies']( role_name=name, region=region, key=key, keyid=keyid, profile=profile) oldpolicies = [x.get('policy_arn') for x in _list] if not _list: ret['comment'] = 'No attached policies in role {0}.'.format(name) return ret if __opts__['test']: ret['comment'] = '{0} policies to be detached from role {1}.'.format(', '.join(oldpolicies), name) ret['result'] = None return ret ret['changes']['old'] = {'managed_policies': oldpolicies} for policy_arn in oldpolicies: policy_unset = __salt__['boto_iam.detach_role_policy'](policy_arn, name, region=region, key=key, keyid=keyid, profile=profile) if not policy_unset: _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['result'] = False ret['comment'] = 'Failed to detach {0} from role {1}'.format(policy_arn, name) return ret _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['comment'] = '{0} policies detached from role {1}.'.format(', '.join(newpolicies), name) return ret def _instance_profile_disassociated( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} is_associated = __salt__['boto_iam.profile_associated'](name, name, region, key, keyid, profile) if is_associated: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be disassociated.'.format(name) ret['result'] = None return ret associated = __salt__['boto_iam.disassociate_profile_from_role'](name, name, region, key, keyid, profile) if associated: ret['changes']['old'] = {'profile_associated': True} ret['changes']['new'] = {'profile_associated': False} ret['comment'] = 'Instance profile {0} disassociated.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to disassociate {0} instance profile from {0} role.'.format(name) return ret
saltstack/salt
salt/states/boto_iam_role.py
absent
python
def absent( name, region=None, key=None, keyid=None, profile=None): ''' Ensure the IAM role is deleted. name Name of the IAM role. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} _ret = _policies_absent(name, region, key, keyid, profile) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _policies_detached(name, region, key, keyid, profile) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_disassociated(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_absent(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _role_absent(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret
Ensure the IAM role is deleted. name Name of the IAM role. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_iam_role.py#L511-L570
[ "def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n", "def _policies_absent(\n name,\n region=None,\n key=None,\n keyid=None,\n profile=None):\n ret = {'result': True, 'comment': '', 'changes': {}}\n _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid,\n profile)\n if not _list:\n ret['comment'] = 'No policies in role {0}.'.format(name)\n return ret\n if __opts__['test']:\n ret['comment'] = '{0} policies to be removed from role {1}.'.format(', '.join(_list), name)\n ret['result'] = None\n return ret\n ret['changes']['old'] = {'policies': _list}\n for policy_name in _list:\n policy_unset = __salt__['boto_iam.delete_role_policy'](name,\n policy_name,\n region, key,\n keyid,\n profile)\n if not policy_unset:\n _list = __salt__['boto_iam.list_role_policies'](name, region,\n key, keyid,\n profile)\n ret['changes']['new'] = {'policies': _list}\n ret['result'] = False\n ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name)\n return ret\n _list = __salt__['boto_iam.list_role_policies'](name, region, key,\n keyid, profile)\n ret['changes']['new'] = {'policies': _list}\n ret['comment'] = '{0} policies removed from role {1}.'.format(', '.join(_list), name)\n return ret\n", "def _policies_detached(\n name,\n region=None,\n key=None,\n keyid=None,\n profile=None):\n ret = {'result': True, 'comment': '', 'changes': {}}\n _list = __salt__['boto_iam.list_attached_role_policies'](\n role_name=name, region=region, key=key, keyid=keyid, profile=profile)\n oldpolicies = [x.get('policy_arn') for x in _list]\n if not _list:\n ret['comment'] = 'No attached policies in role {0}.'.format(name)\n return ret\n if __opts__['test']:\n ret['comment'] = '{0} policies to be detached from role {1}.'.format(', '.join(oldpolicies), name)\n ret['result'] = None\n return ret\n ret['changes']['old'] = {'managed_policies': oldpolicies}\n for policy_arn in oldpolicies:\n policy_unset = __salt__['boto_iam.detach_role_policy'](policy_arn,\n name,\n region=region,\n key=key,\n keyid=keyid,\n profile=profile)\n if not policy_unset:\n _list = __salt__['boto_iam.list_attached_role_policies'](\n name, region=region, key=key, keyid=keyid, profile=profile)\n newpolicies = [x.get('policy_arn') for x in _list]\n ret['changes']['new'] = {'managed_policies': newpolicies}\n ret['result'] = False\n ret['comment'] = 'Failed to detach {0} from role {1}'.format(policy_arn, name)\n return ret\n _list = __salt__['boto_iam.list_attached_role_policies'](\n name, region=region, key=key, keyid=keyid, profile=profile)\n newpolicies = [x.get('policy_arn') for x in _list]\n ret['changes']['new'] = {'managed_policies': newpolicies}\n ret['comment'] = '{0} policies detached from role {1}.'.format(', '.join(newpolicies), name)\n return ret\n", "def _instance_profile_disassociated(\n name,\n region=None,\n key=None,\n keyid=None,\n profile=None):\n ret = {'result': True, 'comment': '', 'changes': {}}\n is_associated = __salt__['boto_iam.profile_associated'](name, name, region,\n key, keyid,\n profile)\n if is_associated:\n if __opts__['test']:\n ret['comment'] = 'Instance profile {0} is set to be disassociated.'.format(name)\n ret['result'] = None\n return ret\n associated = __salt__['boto_iam.disassociate_profile_from_role'](name, name, region, key, keyid, profile)\n if associated:\n ret['changes']['old'] = {'profile_associated': True}\n ret['changes']['new'] = {'profile_associated': False}\n ret['comment'] = 'Instance profile {0} disassociated.'.format(name)\n else:\n ret['result'] = False\n ret['comment'] = 'Failed to disassociate {0} instance profile from {0} role.'.format(name)\n return ret\n", "def _instance_profile_absent(\n name,\n region=None,\n key=None,\n keyid=None,\n profile=None):\n ret = {'result': True, 'comment': '', 'changes': {}}\n\n exists = __salt__['boto_iam.instance_profile_exists'](name, region, key,\n keyid, profile)\n if exists:\n if __opts__['test']:\n ret['comment'] = 'Instance profile {0} is set to be removed.'.format(name)\n ret['result'] = None\n return ret\n deleted = __salt__['boto_iam.delete_instance_profile'](name, region,\n key, keyid,\n profile)\n if deleted:\n ret['changes']['old'] = {'instance_profile': name}\n ret['changes']['new'] = {'instance_profile': None}\n ret['comment'] = 'Instance profile {0} removed.'.format(name)\n else:\n ret['result'] = False\n ret['comment'] = 'Failed to delete {0} instance profile.'.format(name)\n else:\n ret['comment'] = '{0} instance profile does not exist.'.format(name)\n return ret\n", "def _role_absent(\n name,\n region=None,\n key=None,\n keyid=None,\n profile=None):\n ret = {'result': True, 'comment': '', 'changes': {}}\n\n exists = __salt__['boto_iam.role_exists'](name, region, key, keyid,\n profile)\n if exists:\n if __opts__['test']:\n ret['comment'] = 'IAM role {0} is set to be removed.'.format(\n name)\n ret['result'] = None\n return ret\n deleted = __salt__['boto_iam.delete_role'](name, region, key, keyid,\n profile)\n if deleted:\n ret['changes']['old'] = {'role': name}\n ret['changes']['new'] = {'role': None}\n ret['comment'] = 'IAM role {0} removed.'.format(name)\n else:\n ret['result'] = False\n ret['comment'] = 'Failed to delete {0} iam role.'.format(name)\n else:\n ret['comment'] = '{0} role does not exist.'.format(name)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Manage IAM roles ================ .. versionadded:: 2014.7.0 This module uses ``boto``, which can be installed via package, or pip. This module accepts explicit IAM credentials but can also utilize IAM roles assigned to the instance through Instance Profiles. Dynamic credentials are then automatically obtained from AWS API and no further configuration is necessary. More information available `here <http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_. If IAM roles are not used you need to specify them either in a pillar file or in the minion's config file: .. code-block:: yaml iam.keyid: GKTADJGHEIQSXMKKRBJ08H iam.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either passed in as a dict, or as a string to pull from pillars or minion config: .. code-block:: yaml myprofile: keyid: GKTADJGHEIQSXMKKRBJ08H key: askjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 Creating a role will automatically create an instance profile and associate it with the role. This is the default behavior of the AWS console. .. code-block:: yaml myrole: boto_iam_role.present: - region: us-east-1 - key: GKTADJGHEIQSXMKKRBJ08H - keyid: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs - policies_from_pillars: - shared_iam_bootstrap_policy - policies: MySQSPolicy: Statement: - Action: - sqs:* Effect: Allow Resource: - arn:aws:sqs:*:*:* Sid: MyPolicySQS1 MyS3Policy: Statement: - Action: - s3:GetObject Effect: Allow Resource: - arn:aws:s3:*:*:mybucket/* # Using a credentials profile from pillars myrole: boto_iam_role.present: - profile: myiamprofile # Passing in a credentials profile myrole: boto_iam_role.present: - profile: key: GKTADJGHEIQSXMKKRBJ08H keyid: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs region: us-east-1 If ``delete_policies: False`` is specified, existing policies that are not in the given list of policies will not be deleted. This allows manual modifications on the IAM role to be persistent. This functionality was added in 2015.8.0. .. note:: When using the ``profile`` parameter and ``region`` is set outside of the profile group, region is ignored and a default region will be used. If ``region`` is missing from the ``profile`` data set, ``us-east-1`` will be used as the default region. ''' from __future__ import absolute_import, print_function, unicode_literals import logging import salt.utils.dictupdate as dictupdate from salt.utils.odict import OrderedDict import salt.utils.dictdiffer from salt.ext import six log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' return 'boto_iam_role' if 'boto_iam.role_exists' in __salt__ else False def present( name, policy_document=None, policy_document_from_pillars=None, path=None, policies=None, policies_from_pillars=None, managed_policies=None, create_instance_profile=True, region=None, key=None, keyid=None, profile=None, delete_policies=True): ''' Ensure the IAM role exists. name Name of the IAM role. policy_document The policy that grants an entity permission to assume the role. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policy_document_from_pillars A pillar key that contains a role policy document. The statements defined here will be appended with the policy document statements defined in the policy_document argument. .. versionadded:: 2017.7.0 path The path to the role/instance profile. (See https://boto.readthedocs.io/en/latest/ref/iam.html#boto.iam.connection.IAMConnection.create_role) policies A dict of IAM role policies. policies_from_pillars A list of pillars that contain role policy dicts. Policies in the pillars will be merged in the order defined in the list and key conflicts will be handled by later defined keys overriding earlier defined keys. The policies defined here will be merged with the policies defined in the policies argument. If keys conflict, the keys in the policies argument will override the keys defined in policies_from_pillars. managed_policies A list of (AWS or Customer) managed policies to be attached to the role. create_instance_profile A boolean of whether or not to create an instance profile and associate it with this role. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. delete_policies Deletes existing policies that are not in the given list of policies. Default value is ``True``. If ``False`` is specified, existing policies will not be deleted allowing manual modifications on the IAM role to be persistent. .. versionadded:: 2015.8.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} # Build up _policy_document _policy_document = {} if policy_document_from_pillars: from_pillars = __salt__['pillar.get'](policy_document_from_pillars) if from_pillars: _policy_document['Version'] = from_pillars['Version'] _policy_document.setdefault('Statement', []) _policy_document['Statement'].extend(from_pillars['Statement']) if policy_document: _policy_document['Version'] = policy_document['Version'] _policy_document.setdefault('Statement', []) _policy_document['Statement'].extend(policy_document['Statement']) _ret = _role_present(name, _policy_document, path, region, key, keyid, profile) # Build up _policies if not policies: policies = {} if not policies_from_pillars: policies_from_pillars = [] if not managed_policies: managed_policies = [] _policies = {} for policy in policies_from_pillars: _policy = __salt__['pillar.get'](policy) _policies.update(_policy) _policies.update(policies) ret['changes'] = _ret['changes'] ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret if create_instance_profile: _ret = _instance_profile_present(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _instance_profile_associated(name, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _policies_present(name, _policies, region, key, keyid, profile, delete_policies) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] _ret = _policies_attached(name, managed_policies, region, key, keyid, profile) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret def _role_present( name, policy_document=None, path=None, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} role = __salt__['boto_iam.describe_role'](name, region, key, keyid, profile) if not role: if __opts__['test']: ret['comment'] = 'IAM role {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_iam.create_role'](name, policy_document, path, region, key, keyid, profile) if created: ret['changes']['old'] = {'role': None} ret['changes']['new'] = {'role': name} ret['comment'] = 'IAM role {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} IAM role.'.format(name) else: ret['comment'] = '{0} role present.'.format(name) if not policy_document: _policy_document = __salt__['boto_iam.build_policy']( region, key, keyid, profile) else: _policy_document = policy_document if salt.utils.dictdiffer.deep_diff( _sort_policy(role['assume_role_policy_document']), _sort_policy(_policy_document)): if __opts__['test']: msg = 'Assume role policy document to be updated.' ret['comment'] = '{0} {1}'.format(ret['comment'], msg) ret['result'] = None return ret updated = __salt__['boto_iam.update_assume_role_policy']( name, _policy_document, region, key, keyid, profile ) if updated: msg = 'Assume role policy document updated.' ret['comment'] = '{0} {1}'.format(ret['comment'], msg) ret['changes']['old'] = {'policy_document': role['assume_role_policy_document']} ret['changes']['new'] = {'policy_document': _policy_document} else: ret['result'] = False msg = 'Failed to update assume role policy.' ret['comment'] = '{0} {1}'.format(ret['comment'], msg) return ret def _instance_profile_present( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_iam.instance_profile_exists'](name, region, key, keyid, profile) if not exists: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be created.'.format(name) ret['result'] = None return ret created = __salt__['boto_iam.create_instance_profile'](name, region, key, keyid, profile) if created: ret['changes']['old'] = {'instance_profile': None} ret['changes']['new'] = {'instance_profile': name} ret['comment'] = 'Instance profile {0} created.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to create {0} instance profile.'.format(name) return ret def _instance_profile_associated( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} is_associated = __salt__['boto_iam.profile_associated'](name, name, region, key, keyid, profile) if not is_associated: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be associated.'.format(name) ret['result'] = None return ret associated = __salt__['boto_iam.associate_profile_to_role'](name, name, region, key, keyid, profile) if associated: ret['changes']['old'] = {'profile_associated': None} ret['changes']['new'] = {'profile_associated': True} ret['comment'] = 'Instance profile {0} associated.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to associate {0} instance profile with {0} role.'.format(name) return ret def _sort_policy(doc): ''' List-type sub-items in policies don't happen to be order-sensitive, but compare operations will render them unequal, leading to non-idempotent state runs. We'll sort any list-type subitems before comparison to reduce the likelihood of false negatives. ''' if isinstance(doc, list): return sorted([_sort_policy(i) for i in doc]) elif isinstance(doc, (dict, OrderedDict)): return dict([(k, _sort_policy(v)) for k, v in six.iteritems(doc)]) return doc def _policies_present( name, policies=None, region=None, key=None, keyid=None, profile=None, delete_policies=True): ret = {'result': True, 'comment': '', 'changes': {}} policies_to_create = {} policies_to_delete = [] for policy_name, policy in six.iteritems(policies): _policy = __salt__['boto_iam.get_role_policy'](name, policy_name, region, key, keyid, profile) if _policy != policy: policies_to_create[policy_name] = policy _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) for policy_name in _list: if delete_policies and policy_name not in policies: policies_to_delete.append(policy_name) if policies_to_create or policies_to_delete: _to_modify = list(policies_to_delete) _to_modify.extend(policies_to_create) if __opts__['test']: ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name) ret['result'] = None return ret ret['changes']['old'] = {'policies': _list} for policy_name, policy in six.iteritems(policies_to_create): policy_set = __salt__['boto_iam.create_role_policy'](name, policy_name, policy, region, key, keyid, profile) if not policy_set: _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['result'] = False ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name) return ret for policy_name in policies_to_delete: policy_unset = __salt__['boto_iam.delete_role_policy'](name, policy_name, region, key, keyid, profile) if not policy_unset: _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['result'] = False ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name) return ret _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(_list), name) return ret def _policies_attached( name, managed_policies=None, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} policies_to_attach = [] policies_to_detach = [] for policy in managed_policies or []: entities = __salt__['boto_iam.list_entities_for_policy']( policy, entity_filter='Role', region=region, key=key, keyid=keyid, profile=profile) found = False for roledict in entities.get('policy_roles', []): if name == roledict.get('role_name'): found = True break if not found: policies_to_attach.append(policy) _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) oldpolicies = [x.get('policy_arn') for x in _list] for policy_data in _list: if policy_data.get('policy_name') not in managed_policies \ and policy_data.get('policy_arn') not in managed_policies: policies_to_detach.append(policy_data.get('policy_arn')) if policies_to_attach or policies_to_detach: _to_modify = list(policies_to_detach) _to_modify.extend(policies_to_attach) if __opts__['test']: ret['comment'] = '{0} policies to be modified on role {1}.'.format(', '.join(_to_modify), name) ret['result'] = None return ret ret['changes']['old'] = {'managed_policies': oldpolicies} for policy_name in policies_to_attach: policy_set = __salt__['boto_iam.attach_role_policy'](policy_name, role_name=name, region=region, key=key, keyid=keyid, profile=profile) if not policy_set: _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['result'] = False ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name) return ret for policy_name in policies_to_detach: policy_unset = __salt__['boto_iam.detach_role_policy'](policy_name, role_name=name, region=region, key=key, keyid=keyid, profile=profile) if not policy_unset: _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['result'] = False ret['comment'] = 'Failed to remove policy {0} from role {1}'.format(policy_name, name) return ret _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] log.debug(newpolicies) ret['changes']['new'] = {'managed_policies': newpolicies} ret['comment'] = '{0} policies modified on role {1}.'.format(', '.join(newpolicies), name) return ret def _role_absent( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_iam.role_exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'IAM role {0} is set to be removed.'.format( name) ret['result'] = None return ret deleted = __salt__['boto_iam.delete_role'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'role': name} ret['changes']['new'] = {'role': None} ret['comment'] = 'IAM role {0} removed.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} iam role.'.format(name) else: ret['comment'] = '{0} role does not exist.'.format(name) return ret def _instance_profile_absent( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} exists = __salt__['boto_iam.instance_profile_exists'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be removed.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_iam.delete_instance_profile'](name, region, key, keyid, profile) if deleted: ret['changes']['old'] = {'instance_profile': name} ret['changes']['new'] = {'instance_profile': None} ret['comment'] = 'Instance profile {0} removed.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to delete {0} instance profile.'.format(name) else: ret['comment'] = '{0} instance profile does not exist.'.format(name) return ret def _policies_absent( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) if not _list: ret['comment'] = 'No policies in role {0}.'.format(name) return ret if __opts__['test']: ret['comment'] = '{0} policies to be removed from role {1}.'.format(', '.join(_list), name) ret['result'] = None return ret ret['changes']['old'] = {'policies': _list} for policy_name in _list: policy_unset = __salt__['boto_iam.delete_role_policy'](name, policy_name, region, key, keyid, profile) if not policy_unset: _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['result'] = False ret['comment'] = 'Failed to add policy {0} to role {1}'.format(policy_name, name) return ret _list = __salt__['boto_iam.list_role_policies'](name, region, key, keyid, profile) ret['changes']['new'] = {'policies': _list} ret['comment'] = '{0} policies removed from role {1}.'.format(', '.join(_list), name) return ret def _policies_detached( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} _list = __salt__['boto_iam.list_attached_role_policies']( role_name=name, region=region, key=key, keyid=keyid, profile=profile) oldpolicies = [x.get('policy_arn') for x in _list] if not _list: ret['comment'] = 'No attached policies in role {0}.'.format(name) return ret if __opts__['test']: ret['comment'] = '{0} policies to be detached from role {1}.'.format(', '.join(oldpolicies), name) ret['result'] = None return ret ret['changes']['old'] = {'managed_policies': oldpolicies} for policy_arn in oldpolicies: policy_unset = __salt__['boto_iam.detach_role_policy'](policy_arn, name, region=region, key=key, keyid=keyid, profile=profile) if not policy_unset: _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['result'] = False ret['comment'] = 'Failed to detach {0} from role {1}'.format(policy_arn, name) return ret _list = __salt__['boto_iam.list_attached_role_policies']( name, region=region, key=key, keyid=keyid, profile=profile) newpolicies = [x.get('policy_arn') for x in _list] ret['changes']['new'] = {'managed_policies': newpolicies} ret['comment'] = '{0} policies detached from role {1}.'.format(', '.join(newpolicies), name) return ret def _instance_profile_disassociated( name, region=None, key=None, keyid=None, profile=None): ret = {'result': True, 'comment': '', 'changes': {}} is_associated = __salt__['boto_iam.profile_associated'](name, name, region, key, keyid, profile) if is_associated: if __opts__['test']: ret['comment'] = 'Instance profile {0} is set to be disassociated.'.format(name) ret['result'] = None return ret associated = __salt__['boto_iam.disassociate_profile_from_role'](name, name, region, key, keyid, profile) if associated: ret['changes']['old'] = {'profile_associated': True} ret['changes']['new'] = {'profile_associated': False} ret['comment'] = 'Instance profile {0} disassociated.'.format(name) else: ret['result'] = False ret['comment'] = 'Failed to disassociate {0} instance profile from {0} role.'.format(name) return ret